forked from M-Labs/nalgebra
Merge branch 'dev' into matrixmarket-io
This commit is contained in:
commit
030f155dc3
22
CHANGELOG.md
22
CHANGELOG.md
@ -4,7 +4,29 @@ documented here.
|
|||||||
|
|
||||||
This project adheres to [Semantic Versioning](https://semver.org/).
|
This project adheres to [Semantic Versioning](https://semver.org/).
|
||||||
|
|
||||||
|
## [0.31.0] (30 Apr. 2022)
|
||||||
|
|
||||||
|
### Breaking changes
|
||||||
|
- Switch to `cust` 0.3 (for CUDA support).
|
||||||
|
- Switch to `rkyv` 0.7
|
||||||
|
- Remove support for serialization based on `abomonation`.
|
||||||
|
- Remove support for conversions between `nalgebra` types and `glam` 0.13.
|
||||||
|
|
||||||
|
### Modified
|
||||||
|
- The aliases for `Const` types have been simplified to help `rust-analyzer`.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Add `TryFrom` conversion between `UnitVector2/3/4` and `glam`’s `Vec2/3/4`.
|
||||||
|
- `nalgebra-sparse`: added support for serialization of sparse matrices with `serde`.
|
||||||
|
- `nalgebra-sparse`: add a CSC matrix constructor from unsorted (but valid) data.
|
||||||
|
- `nalgebra-lapack`: add generalized eigenvalues/eigenvectors calculation + QZ decomposition.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Improve stability of SVD.
|
||||||
|
- Fix slerp for `UnitComplex`.
|
||||||
|
|
||||||
## [0.30.1] (09 Jan. 2022)
|
## [0.30.1] (09 Jan. 2022)
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
- Add conversion from/to types of `glam` 0.19 and 0.20.
|
- Add conversion from/to types of `glam` 0.19 and 0.20.
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "nalgebra"
|
name = "nalgebra"
|
||||||
version = "0.30.1"
|
version = "0.31.0"
|
||||||
authors = [ "Sébastien Crozet <developer@crozet.re>" ]
|
authors = [ "Sébastien Crozet <developer@crozet.re>" ]
|
||||||
|
|
||||||
description = "General-purpose linear algebra library with transformations and statically-sized or dynamically-sized matrices."
|
description = "General-purpose linear algebra library with transformations and statically-sized or dynamically-sized matrices."
|
||||||
@ -37,7 +37,6 @@ cuda = [ "cust_core", "simba/cuda" ]
|
|||||||
# Conversion
|
# Conversion
|
||||||
convert-mint = [ "mint" ]
|
convert-mint = [ "mint" ]
|
||||||
convert-bytemuck = [ "bytemuck" ]
|
convert-bytemuck = [ "bytemuck" ]
|
||||||
convert-glam013 = [ "glam013" ]
|
|
||||||
convert-glam014 = [ "glam014" ]
|
convert-glam014 = [ "glam014" ]
|
||||||
convert-glam015 = [ "glam015" ]
|
convert-glam015 = [ "glam015" ]
|
||||||
convert-glam016 = [ "glam016" ]
|
convert-glam016 = [ "glam016" ]
|
||||||
@ -54,7 +53,7 @@ convert-glam020 = [ "glam020" ]
|
|||||||
serde-serialize-no-std = [ "serde", "num-complex/serde" ]
|
serde-serialize-no-std = [ "serde", "num-complex/serde" ]
|
||||||
serde-serialize = [ "serde-serialize-no-std", "serde/std" ]
|
serde-serialize = [ "serde-serialize-no-std", "serde/std" ]
|
||||||
rkyv-serialize-no-std = [ "rkyv" ]
|
rkyv-serialize-no-std = [ "rkyv" ]
|
||||||
rkyv-serialize = [ "rkyv-serialize-no-std", "rkyv/std" ]
|
rkyv-serialize = [ "rkyv-serialize-no-std", "rkyv/std", "bytecheck" ]
|
||||||
|
|
||||||
# Randomness
|
# Randomness
|
||||||
## To use rand in a #[no-std] environment, enable the
|
## To use rand in a #[no-std] environment, enable the
|
||||||
@ -80,7 +79,8 @@ alga = { version = "0.9", default-features = false, optional = true }
|
|||||||
rand_distr = { version = "0.4", default-features = false, optional = true }
|
rand_distr = { version = "0.4", default-features = false, optional = true }
|
||||||
matrixmultiply = { version = "0.3", optional = true }
|
matrixmultiply = { version = "0.3", optional = true }
|
||||||
serde = { version = "1.0", default-features = false, features = [ "derive" ], optional = true }
|
serde = { version = "1.0", default-features = false, features = [ "derive" ], optional = true }
|
||||||
rkyv = { version = "~0.6.4", default-features = false, features = ["const_generics"], optional = true }
|
rkyv = { version = "~0.7.1", optional = true }
|
||||||
|
bytecheck = { version = "~0.6.1", optional = true }
|
||||||
mint = { version = "0.5", optional = true }
|
mint = { version = "0.5", optional = true }
|
||||||
quickcheck = { version = "1", optional = true }
|
quickcheck = { version = "1", optional = true }
|
||||||
pest = { version = "2", optional = true }
|
pest = { version = "2", optional = true }
|
||||||
@ -88,7 +88,6 @@ pest_derive = { version = "2", optional = true }
|
|||||||
bytemuck = { version = "1.5", optional = true }
|
bytemuck = { version = "1.5", optional = true }
|
||||||
matrixcompare-core = { version = "0.1", optional = true }
|
matrixcompare-core = { version = "0.1", optional = true }
|
||||||
proptest = { version = "1", optional = true, default-features = false, features = ["std"] }
|
proptest = { version = "1", optional = true, default-features = false, features = ["std"] }
|
||||||
glam013 = { package = "glam", version = "0.13", optional = true }
|
|
||||||
glam014 = { package = "glam", version = "0.14", optional = true }
|
glam014 = { package = "glam", version = "0.14", optional = true }
|
||||||
glam015 = { package = "glam", version = "0.15", optional = true }
|
glam015 = { package = "glam", version = "0.15", optional = true }
|
||||||
glam016 = { package = "glam", version = "0.16", optional = true }
|
glam016 = { package = "glam", version = "0.16", optional = true }
|
||||||
|
@ -4,7 +4,7 @@ version = "0.0.0"
|
|||||||
authors = [ "You" ]
|
authors = [ "You" ]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
nalgebra = "0.30.0"
|
nalgebra = "0.31.0"
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "example"
|
name = "example"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "nalgebra-glm"
|
name = "nalgebra-glm"
|
||||||
version = "0.16.0"
|
version = "0.17.0"
|
||||||
authors = ["sebcrozet <developer@crozet.re>"]
|
authors = ["sebcrozet <developer@crozet.re>"]
|
||||||
|
|
||||||
description = "A computer-graphics oriented API for nalgebra, inspired by the C++ GLM library."
|
description = "A computer-graphics oriented API for nalgebra, inspired by the C++ GLM library."
|
||||||
@ -26,7 +26,6 @@ cuda = [ "nalgebra/cuda" ]
|
|||||||
# Conversion
|
# Conversion
|
||||||
convert-mint = [ "nalgebra/mint" ]
|
convert-mint = [ "nalgebra/mint" ]
|
||||||
convert-bytemuck = [ "nalgebra/bytemuck" ]
|
convert-bytemuck = [ "nalgebra/bytemuck" ]
|
||||||
convert-glam013 = [ "nalgebra/glam013" ]
|
|
||||||
convert-glam014 = [ "nalgebra/glam014" ]
|
convert-glam014 = [ "nalgebra/glam014" ]
|
||||||
convert-glam015 = [ "nalgebra/glam015" ]
|
convert-glam015 = [ "nalgebra/glam015" ]
|
||||||
convert-glam016 = [ "nalgebra/glam016" ]
|
convert-glam016 = [ "nalgebra/glam016" ]
|
||||||
@ -37,4 +36,4 @@ convert-glam018 = [ "nalgebra/glam018" ]
|
|||||||
num-traits = { version = "0.2", default-features = false }
|
num-traits = { version = "0.2", default-features = false }
|
||||||
approx = { version = "0.5", default-features = false }
|
approx = { version = "0.5", default-features = false }
|
||||||
simba = { version = "0.7", default-features = false }
|
simba = { version = "0.7", default-features = false }
|
||||||
nalgebra = { path = "..", version = "0.30", default-features = false }
|
nalgebra = { path = "..", version = "0.31", default-features = false }
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "nalgebra-lapack"
|
name = "nalgebra-lapack"
|
||||||
version = "0.21.0"
|
version = "0.22.0"
|
||||||
authors = [ "Sébastien Crozet <developer@crozet.re>", "Andrew Straw <strawman@astraw.com>" ]
|
authors = [ "Sébastien Crozet <developer@crozet.re>", "Andrew Straw <strawman@astraw.com>" ]
|
||||||
|
|
||||||
description = "Matrix decompositions using nalgebra matrices and Lapack bindings."
|
description = "Matrix decompositions using nalgebra matrices and Lapack bindings."
|
||||||
@ -29,7 +29,7 @@ accelerate = ["lapack-src/accelerate"]
|
|||||||
intel-mkl = ["lapack-src/intel-mkl"]
|
intel-mkl = ["lapack-src/intel-mkl"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
nalgebra = { version = "0.30", path = ".." }
|
nalgebra = { version = "0.31", path = ".." }
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
num-complex = { version = "0.4", default-features = false }
|
num-complex = { version = "0.4", default-features = false }
|
||||||
simba = "0.7"
|
simba = "0.7"
|
||||||
@ -39,7 +39,7 @@ lapack-src = { version = "0.8", default-features = false }
|
|||||||
# clippy = "*"
|
# clippy = "*"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
nalgebra = { version = "0.30", features = [ "arbitrary", "rand" ], path = ".." }
|
nalgebra = { version = "0.31", features = [ "arbitrary", "rand" ], path = ".." }
|
||||||
proptest = { version = "1", default-features = false, features = ["std"] }
|
proptest = { version = "1", default-features = false, features = ["std"] }
|
||||||
quickcheck = "1"
|
quickcheck = "1"
|
||||||
approx = "0.5"
|
approx = "0.5"
|
||||||
|
350
nalgebra-lapack/src/generalized_eigenvalues.rs
Normal file
350
nalgebra-lapack/src/generalized_eigenvalues.rs
Normal file
@ -0,0 +1,350 @@
|
|||||||
|
#[cfg(feature = "serde-serialize")]
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use num::Zero;
|
||||||
|
use num_complex::Complex;
|
||||||
|
|
||||||
|
use simba::scalar::RealField;
|
||||||
|
|
||||||
|
use crate::ComplexHelper;
|
||||||
|
use na::allocator::Allocator;
|
||||||
|
use na::dimension::{Const, Dim};
|
||||||
|
use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar};
|
||||||
|
|
||||||
|
use lapack;
|
||||||
|
|
||||||
|
/// Generalized eigenvalues and generalized eigenvectors (left and right) of a pair of N*N real square matrices.
|
||||||
|
///
|
||||||
|
/// Each generalized eigenvalue (lambda) satisfies determinant(A - lambda*B) = 0
|
||||||
|
///
|
||||||
|
/// The right eigenvector v(j) corresponding to the eigenvalue lambda(j)
|
||||||
|
/// of (A,B) satisfies
|
||||||
|
///
|
||||||
|
/// A * v(j) = lambda(j) * B * v(j).
|
||||||
|
///
|
||||||
|
/// The left eigenvector u(j) corresponding to the eigenvalue lambda(j)
|
||||||
|
/// of (A,B) satisfies
|
||||||
|
///
|
||||||
|
/// u(j)**H * A = lambda(j) * u(j)**H * B .
|
||||||
|
/// where u(j)**H is the conjugate-transpose of u(j).
|
||||||
|
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "serde-serialize",
|
||||||
|
serde(
|
||||||
|
bound(serialize = "DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
||||||
|
OVector<T, D>: Serialize,
|
||||||
|
OMatrix<T, D, D>: Serialize")
|
||||||
|
)
|
||||||
|
)]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "serde-serialize",
|
||||||
|
serde(
|
||||||
|
bound(deserialize = "DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
||||||
|
OVector<T, D>: Deserialize<'de>,
|
||||||
|
OMatrix<T, D, D>: Deserialize<'de>")
|
||||||
|
)
|
||||||
|
)]
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct GeneralizedEigen<T: Scalar, D: Dim>
|
||||||
|
where
|
||||||
|
DefaultAllocator: Allocator<T, D> + Allocator<T, D, D>,
|
||||||
|
{
|
||||||
|
alphar: OVector<T, D>,
|
||||||
|
alphai: OVector<T, D>,
|
||||||
|
beta: OVector<T, D>,
|
||||||
|
vsl: OMatrix<T, D, D>,
|
||||||
|
vsr: OMatrix<T, D, D>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + Copy, D: Dim> Copy for GeneralizedEigen<T, D>
|
||||||
|
where
|
||||||
|
DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
||||||
|
OMatrix<T, D, D>: Copy,
|
||||||
|
OVector<T, D>: Copy,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: GeneralizedEigenScalar + RealField + Copy, D: Dim> GeneralizedEigen<T, D>
|
||||||
|
where
|
||||||
|
DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
||||||
|
{
|
||||||
|
/// Attempts to compute the generalized eigenvalues, and left and right associated eigenvectors
|
||||||
|
/// via the raw returns from LAPACK's dggev and sggev routines
|
||||||
|
///
|
||||||
|
/// Panics if the method did not converge.
|
||||||
|
pub fn new(a: OMatrix<T, D, D>, b: OMatrix<T, D, D>) -> Self {
|
||||||
|
Self::try_new(a, b).expect("Calculation of generalized eigenvalues failed.")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts to compute the generalized eigenvalues (and eigenvectors) via the raw returns from LAPACK's
|
||||||
|
/// dggev and sggev routines
|
||||||
|
///
|
||||||
|
/// Returns `None` if the method did not converge.
|
||||||
|
pub fn try_new(mut a: OMatrix<T, D, D>, mut b: OMatrix<T, D, D>) -> Option<Self> {
|
||||||
|
assert!(
|
||||||
|
a.is_square() && b.is_square(),
|
||||||
|
"Unable to compute the generalized eigenvalues of non-square matrices."
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
a.shape_generic() == b.shape_generic(),
|
||||||
|
"Unable to compute the generalized eigenvalues of two square matrices of different dimensions."
|
||||||
|
);
|
||||||
|
|
||||||
|
let (nrows, ncols) = a.shape_generic();
|
||||||
|
let n = nrows.value();
|
||||||
|
|
||||||
|
let mut info = 0;
|
||||||
|
|
||||||
|
let mut alphar = Matrix::zeros_generic(nrows, Const::<1>);
|
||||||
|
let mut alphai = Matrix::zeros_generic(nrows, Const::<1>);
|
||||||
|
let mut beta = Matrix::zeros_generic(nrows, Const::<1>);
|
||||||
|
let mut vsl = Matrix::zeros_generic(nrows, ncols);
|
||||||
|
let mut vsr = Matrix::zeros_generic(nrows, ncols);
|
||||||
|
|
||||||
|
let lwork = T::xggev_work_size(
|
||||||
|
b'V',
|
||||||
|
b'V',
|
||||||
|
n as i32,
|
||||||
|
a.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
b.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
alphar.as_mut_slice(),
|
||||||
|
alphai.as_mut_slice(),
|
||||||
|
beta.as_mut_slice(),
|
||||||
|
vsl.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
vsr.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
&mut info,
|
||||||
|
);
|
||||||
|
lapack_check!(info);
|
||||||
|
|
||||||
|
let mut work = vec![T::zero(); lwork as usize];
|
||||||
|
|
||||||
|
T::xggev(
|
||||||
|
b'V',
|
||||||
|
b'V',
|
||||||
|
n as i32,
|
||||||
|
a.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
b.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
alphar.as_mut_slice(),
|
||||||
|
alphai.as_mut_slice(),
|
||||||
|
beta.as_mut_slice(),
|
||||||
|
vsl.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
vsr.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
&mut work,
|
||||||
|
lwork,
|
||||||
|
&mut info,
|
||||||
|
);
|
||||||
|
lapack_check!(info);
|
||||||
|
|
||||||
|
Some(GeneralizedEigen {
|
||||||
|
alphar,
|
||||||
|
alphai,
|
||||||
|
beta,
|
||||||
|
vsl,
|
||||||
|
vsr,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calculates the generalized eigenvectors (left and right) associated with the generalized eigenvalues
|
||||||
|
///
|
||||||
|
/// Outputs two matrices.
|
||||||
|
/// The first output matrix contains the left eigenvectors of the generalized eigenvalues
|
||||||
|
/// as columns.
|
||||||
|
/// The second matrix contains the right eigenvectors of the generalized eigenvalues
|
||||||
|
/// as columns.
|
||||||
|
pub fn eigenvectors(&self) -> (OMatrix<Complex<T>, D, D>, OMatrix<Complex<T>, D, D>)
|
||||||
|
where
|
||||||
|
DefaultAllocator:
|
||||||
|
Allocator<Complex<T>, D, D> + Allocator<Complex<T>, D> + Allocator<(Complex<T>, T), D>,
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
How the eigenvectors are built up:
|
||||||
|
|
||||||
|
Since the input entries are all real, the generalized eigenvalues if complex come in pairs
|
||||||
|
as a consequence of the [complex conjugate root thorem](https://en.wikipedia.org/wiki/Complex_conjugate_root_theorem)
|
||||||
|
The Lapack routine output reflects this by expecting the user to unpack the real and complex eigenvalues associated
|
||||||
|
eigenvectors from the real matrix output via the following procedure
|
||||||
|
|
||||||
|
(Note: VL stands for the lapack real matrix output containing the left eigenvectors as columns,
|
||||||
|
VR stands for the lapack real matrix output containing the right eigenvectors as columns)
|
||||||
|
|
||||||
|
If the j-th and (j+1)-th eigenvalues form a complex conjugate pair,
|
||||||
|
then
|
||||||
|
|
||||||
|
u(j) = VL(:,j)+i*VL(:,j+1)
|
||||||
|
u(j+1) = VL(:,j)-i*VL(:,j+1)
|
||||||
|
|
||||||
|
and
|
||||||
|
|
||||||
|
u(j) = VR(:,j)+i*VR(:,j+1)
|
||||||
|
v(j+1) = VR(:,j)-i*VR(:,j+1).
|
||||||
|
*/
|
||||||
|
|
||||||
|
let n = self.vsl.shape().0;
|
||||||
|
|
||||||
|
let mut l = self.vsl.map(|x| Complex::new(x, T::RealField::zero()));
|
||||||
|
|
||||||
|
let mut r = self.vsr.map(|x| Complex::new(x, T::RealField::zero()));
|
||||||
|
|
||||||
|
let eigenvalues = self.raw_eigenvalues();
|
||||||
|
|
||||||
|
let mut c = 0;
|
||||||
|
|
||||||
|
while c < n {
|
||||||
|
if eigenvalues[c].0.im.abs() != T::RealField::zero() && c + 1 < n {
|
||||||
|
// taking care of the left eigenvector matrix
|
||||||
|
l.column_mut(c).zip_apply(&self.vsl.column(c + 1), |r, i| {
|
||||||
|
*r = Complex::new(r.re.clone(), i.clone());
|
||||||
|
});
|
||||||
|
l.column_mut(c + 1).zip_apply(&self.vsl.column(c), |i, r| {
|
||||||
|
*i = Complex::new(r.clone(), -i.re.clone());
|
||||||
|
});
|
||||||
|
|
||||||
|
// taking care of the right eigenvector matrix
|
||||||
|
r.column_mut(c).zip_apply(&self.vsr.column(c + 1), |r, i| {
|
||||||
|
*r = Complex::new(r.re.clone(), i.clone());
|
||||||
|
});
|
||||||
|
r.column_mut(c + 1).zip_apply(&self.vsr.column(c), |i, r| {
|
||||||
|
*i = Complex::new(r.clone(), -i.re.clone());
|
||||||
|
});
|
||||||
|
|
||||||
|
c += 2;
|
||||||
|
} else {
|
||||||
|
c += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(l, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Outputs the unprocessed (almost) version of generalized eigenvalues ((alphar, alphai), beta)
|
||||||
|
/// straight from LAPACK
|
||||||
|
#[must_use]
|
||||||
|
pub fn raw_eigenvalues(&self) -> OVector<(Complex<T>, T), D>
|
||||||
|
where
|
||||||
|
DefaultAllocator: Allocator<(Complex<T>, T), D>,
|
||||||
|
{
|
||||||
|
let mut out = Matrix::from_element_generic(
|
||||||
|
self.vsl.shape_generic().0,
|
||||||
|
Const::<1>,
|
||||||
|
(Complex::zero(), T::RealField::zero()),
|
||||||
|
);
|
||||||
|
|
||||||
|
for i in 0..out.len() {
|
||||||
|
out[i] = (Complex::new(self.alphar[i], self.alphai[i]), self.beta[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
out
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Lapack functions dispatch.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/// Trait implemented by scalars for which Lapack implements the RealField GeneralizedEigen decomposition.
|
||||||
|
pub trait GeneralizedEigenScalar: Scalar {
|
||||||
|
#[allow(missing_docs)]
|
||||||
|
fn xggev(
|
||||||
|
jobvsl: u8,
|
||||||
|
jobvsr: u8,
|
||||||
|
n: i32,
|
||||||
|
a: &mut [Self],
|
||||||
|
lda: i32,
|
||||||
|
b: &mut [Self],
|
||||||
|
ldb: i32,
|
||||||
|
alphar: &mut [Self],
|
||||||
|
alphai: &mut [Self],
|
||||||
|
beta: &mut [Self],
|
||||||
|
vsl: &mut [Self],
|
||||||
|
ldvsl: i32,
|
||||||
|
vsr: &mut [Self],
|
||||||
|
ldvsr: i32,
|
||||||
|
work: &mut [Self],
|
||||||
|
lwork: i32,
|
||||||
|
info: &mut i32,
|
||||||
|
);
|
||||||
|
|
||||||
|
#[allow(missing_docs)]
|
||||||
|
fn xggev_work_size(
|
||||||
|
jobvsl: u8,
|
||||||
|
jobvsr: u8,
|
||||||
|
n: i32,
|
||||||
|
a: &mut [Self],
|
||||||
|
lda: i32,
|
||||||
|
b: &mut [Self],
|
||||||
|
ldb: i32,
|
||||||
|
alphar: &mut [Self],
|
||||||
|
alphai: &mut [Self],
|
||||||
|
beta: &mut [Self],
|
||||||
|
vsl: &mut [Self],
|
||||||
|
ldvsl: i32,
|
||||||
|
vsr: &mut [Self],
|
||||||
|
ldvsr: i32,
|
||||||
|
info: &mut i32,
|
||||||
|
) -> i32;
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! generalized_eigen_scalar_impl (
|
||||||
|
($N: ty, $xggev: path) => (
|
||||||
|
impl GeneralizedEigenScalar for $N {
|
||||||
|
#[inline]
|
||||||
|
fn xggev(jobvsl: u8,
|
||||||
|
jobvsr: u8,
|
||||||
|
n: i32,
|
||||||
|
a: &mut [$N],
|
||||||
|
lda: i32,
|
||||||
|
b: &mut [$N],
|
||||||
|
ldb: i32,
|
||||||
|
alphar: &mut [$N],
|
||||||
|
alphai: &mut [$N],
|
||||||
|
beta : &mut [$N],
|
||||||
|
vsl: &mut [$N],
|
||||||
|
ldvsl: i32,
|
||||||
|
vsr: &mut [$N],
|
||||||
|
ldvsr: i32,
|
||||||
|
work: &mut [$N],
|
||||||
|
lwork: i32,
|
||||||
|
info: &mut i32) {
|
||||||
|
unsafe { $xggev(jobvsl, jobvsr, n, a, lda, b, ldb, alphar, alphai, beta, vsl, ldvsl, vsr, ldvsr, work, lwork, info); }
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn xggev_work_size(jobvsl: u8,
|
||||||
|
jobvsr: u8,
|
||||||
|
n: i32,
|
||||||
|
a: &mut [$N],
|
||||||
|
lda: i32,
|
||||||
|
b: &mut [$N],
|
||||||
|
ldb: i32,
|
||||||
|
alphar: &mut [$N],
|
||||||
|
alphai: &mut [$N],
|
||||||
|
beta : &mut [$N],
|
||||||
|
vsl: &mut [$N],
|
||||||
|
ldvsl: i32,
|
||||||
|
vsr: &mut [$N],
|
||||||
|
ldvsr: i32,
|
||||||
|
info: &mut i32)
|
||||||
|
-> i32 {
|
||||||
|
let mut work = [ Zero::zero() ];
|
||||||
|
let lwork = -1 as i32;
|
||||||
|
|
||||||
|
unsafe { $xggev(jobvsl, jobvsr, n, a, lda, b, ldb, alphar, alphai, beta, vsl, ldvsl, vsr, ldvsr, &mut work, lwork, info); }
|
||||||
|
ComplexHelper::real_part(work[0]) as i32
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
generalized_eigen_scalar_impl!(f32, lapack::sggev);
|
||||||
|
generalized_eigen_scalar_impl!(f64, lapack::dggev);
|
@ -83,9 +83,11 @@ mod lapack_check;
|
|||||||
|
|
||||||
mod cholesky;
|
mod cholesky;
|
||||||
mod eigen;
|
mod eigen;
|
||||||
|
mod generalized_eigenvalues;
|
||||||
mod hessenberg;
|
mod hessenberg;
|
||||||
mod lu;
|
mod lu;
|
||||||
mod qr;
|
mod qr;
|
||||||
|
mod qz;
|
||||||
mod schur;
|
mod schur;
|
||||||
mod svd;
|
mod svd;
|
||||||
mod symmetric_eigen;
|
mod symmetric_eigen;
|
||||||
@ -94,9 +96,11 @@ use num_complex::Complex;
|
|||||||
|
|
||||||
pub use self::cholesky::{Cholesky, CholeskyScalar};
|
pub use self::cholesky::{Cholesky, CholeskyScalar};
|
||||||
pub use self::eigen::Eigen;
|
pub use self::eigen::Eigen;
|
||||||
|
pub use self::generalized_eigenvalues::GeneralizedEigen;
|
||||||
pub use self::hessenberg::Hessenberg;
|
pub use self::hessenberg::Hessenberg;
|
||||||
pub use self::lu::{LUScalar, LU};
|
pub use self::lu::{LUScalar, LU};
|
||||||
pub use self::qr::QR;
|
pub use self::qr::QR;
|
||||||
|
pub use self::qz::QZ;
|
||||||
pub use self::schur::Schur;
|
pub use self::schur::Schur;
|
||||||
pub use self::svd::SVD;
|
pub use self::svd::SVD;
|
||||||
pub use self::symmetric_eigen::SymmetricEigen;
|
pub use self::symmetric_eigen::SymmetricEigen;
|
||||||
|
321
nalgebra-lapack/src/qz.rs
Normal file
321
nalgebra-lapack/src/qz.rs
Normal file
@ -0,0 +1,321 @@
|
|||||||
|
#[cfg(feature = "serde-serialize")]
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use num::Zero;
|
||||||
|
use num_complex::Complex;
|
||||||
|
|
||||||
|
use simba::scalar::RealField;
|
||||||
|
|
||||||
|
use crate::ComplexHelper;
|
||||||
|
use na::allocator::Allocator;
|
||||||
|
use na::dimension::{Const, Dim};
|
||||||
|
use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar};
|
||||||
|
|
||||||
|
use lapack;
|
||||||
|
|
||||||
|
/// QZ decomposition of a pair of N*N square matrices.
|
||||||
|
///
|
||||||
|
/// Retrieves the left and right matrices of Schur Vectors (VSL and VSR)
|
||||||
|
/// the upper-quasitriangular matrix `S` and upper triangular matrix `T` such that the
|
||||||
|
/// decomposed input matrix `a` equals `VSL * S * VSL.transpose()` and
|
||||||
|
/// decomposed input matrix `b` equals `VSL * T * VSL.transpose()`.
|
||||||
|
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "serde-serialize",
|
||||||
|
serde(
|
||||||
|
bound(serialize = "DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
||||||
|
OVector<T, D>: Serialize,
|
||||||
|
OMatrix<T, D, D>: Serialize")
|
||||||
|
)
|
||||||
|
)]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "serde-serialize",
|
||||||
|
serde(
|
||||||
|
bound(deserialize = "DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
||||||
|
OVector<T, D>: Deserialize<'de>,
|
||||||
|
OMatrix<T, D, D>: Deserialize<'de>")
|
||||||
|
)
|
||||||
|
)]
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct QZ<T: Scalar, D: Dim>
|
||||||
|
where
|
||||||
|
DefaultAllocator: Allocator<T, D> + Allocator<T, D, D>,
|
||||||
|
{
|
||||||
|
alphar: OVector<T, D>,
|
||||||
|
alphai: OVector<T, D>,
|
||||||
|
beta: OVector<T, D>,
|
||||||
|
vsl: OMatrix<T, D, D>,
|
||||||
|
s: OMatrix<T, D, D>,
|
||||||
|
vsr: OMatrix<T, D, D>,
|
||||||
|
t: OMatrix<T, D, D>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + Copy, D: Dim> Copy for QZ<T, D>
|
||||||
|
where
|
||||||
|
DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
||||||
|
OMatrix<T, D, D>: Copy,
|
||||||
|
OVector<T, D>: Copy,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: QZScalar + RealField, D: Dim> QZ<T, D>
|
||||||
|
where
|
||||||
|
DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
||||||
|
{
|
||||||
|
/// Attempts to compute the QZ decomposition of input real square matrices `a` and `b`.
|
||||||
|
///
|
||||||
|
/// i.e retrieves the left and right matrices of Schur Vectors (VSL and VSR)
|
||||||
|
/// the upper-quasitriangular matrix `S` and upper triangular matrix `T` such that the
|
||||||
|
/// decomposed matrix `a` equals `VSL * S * VSL.transpose()` and
|
||||||
|
/// decomposed matrix `b` equals `VSL * T * VSL.transpose()`.
|
||||||
|
///
|
||||||
|
/// Panics if the method did not converge.
|
||||||
|
pub fn new(a: OMatrix<T, D, D>, b: OMatrix<T, D, D>) -> Self {
|
||||||
|
Self::try_new(a, b).expect("QZ decomposition: convergence failed.")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes the decomposition of input matrices `a` and `b` into a pair of matrices of Schur vectors
|
||||||
|
/// , a quasi-upper triangular matrix and an upper-triangular matrix .
|
||||||
|
///
|
||||||
|
/// Returns `None` if the method did not converge.
|
||||||
|
pub fn try_new(mut a: OMatrix<T, D, D>, mut b: OMatrix<T, D, D>) -> Option<Self> {
|
||||||
|
assert!(
|
||||||
|
a.is_square() && b.is_square(),
|
||||||
|
"Unable to compute the qz decomposition of non-square matrices."
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
a.shape_generic() == b.shape_generic(),
|
||||||
|
"Unable to compute the qz decomposition of two square matrices of different dimensions."
|
||||||
|
);
|
||||||
|
|
||||||
|
let (nrows, ncols) = a.shape_generic();
|
||||||
|
let n = nrows.value();
|
||||||
|
|
||||||
|
let mut info = 0;
|
||||||
|
|
||||||
|
let mut alphar = Matrix::zeros_generic(nrows, Const::<1>);
|
||||||
|
let mut alphai = Matrix::zeros_generic(nrows, Const::<1>);
|
||||||
|
let mut beta = Matrix::zeros_generic(nrows, Const::<1>);
|
||||||
|
let mut vsl = Matrix::zeros_generic(nrows, ncols);
|
||||||
|
let mut vsr = Matrix::zeros_generic(nrows, ncols);
|
||||||
|
// Placeholders:
|
||||||
|
let mut bwork = [0i32];
|
||||||
|
let mut unused = 0;
|
||||||
|
|
||||||
|
let lwork = T::xgges_work_size(
|
||||||
|
b'V',
|
||||||
|
b'V',
|
||||||
|
b'N',
|
||||||
|
n as i32,
|
||||||
|
a.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
b.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
&mut unused,
|
||||||
|
alphar.as_mut_slice(),
|
||||||
|
alphai.as_mut_slice(),
|
||||||
|
beta.as_mut_slice(),
|
||||||
|
vsl.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
vsr.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
&mut bwork,
|
||||||
|
&mut info,
|
||||||
|
);
|
||||||
|
lapack_check!(info);
|
||||||
|
|
||||||
|
let mut work = vec![T::zero(); lwork as usize];
|
||||||
|
|
||||||
|
T::xgges(
|
||||||
|
b'V',
|
||||||
|
b'V',
|
||||||
|
b'N',
|
||||||
|
n as i32,
|
||||||
|
a.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
b.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
&mut unused,
|
||||||
|
alphar.as_mut_slice(),
|
||||||
|
alphai.as_mut_slice(),
|
||||||
|
beta.as_mut_slice(),
|
||||||
|
vsl.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
vsr.as_mut_slice(),
|
||||||
|
n as i32,
|
||||||
|
&mut work,
|
||||||
|
lwork,
|
||||||
|
&mut bwork,
|
||||||
|
&mut info,
|
||||||
|
);
|
||||||
|
lapack_check!(info);
|
||||||
|
|
||||||
|
Some(QZ {
|
||||||
|
alphar,
|
||||||
|
alphai,
|
||||||
|
beta,
|
||||||
|
vsl,
|
||||||
|
s: a,
|
||||||
|
vsr,
|
||||||
|
t: b,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Retrieves the left and right matrices of Schur Vectors (VSL and VSR)
|
||||||
|
/// the upper-quasitriangular matrix `S` and upper triangular matrix `T` such that the
|
||||||
|
/// decomposed input matrix `a` equals `VSL * S * VSL.transpose()` and
|
||||||
|
/// decomposed input matrix `b` equals `VSL * T * VSL.transpose()`.
|
||||||
|
pub fn unpack(
|
||||||
|
self,
|
||||||
|
) -> (
|
||||||
|
OMatrix<T, D, D>,
|
||||||
|
OMatrix<T, D, D>,
|
||||||
|
OMatrix<T, D, D>,
|
||||||
|
OMatrix<T, D, D>,
|
||||||
|
) {
|
||||||
|
(self.vsl, self.s, self.t, self.vsr)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// outputs the unprocessed (almost) version of generalized eigenvalues ((alphar, alpai), beta)
|
||||||
|
/// straight from LAPACK
|
||||||
|
#[must_use]
|
||||||
|
pub fn raw_eigenvalues(&self) -> OVector<(Complex<T>, T), D>
|
||||||
|
where
|
||||||
|
DefaultAllocator: Allocator<(Complex<T>, T), D>,
|
||||||
|
{
|
||||||
|
let mut out = Matrix::from_element_generic(
|
||||||
|
self.vsl.shape_generic().0,
|
||||||
|
Const::<1>,
|
||||||
|
(Complex::zero(), T::RealField::zero()),
|
||||||
|
);
|
||||||
|
|
||||||
|
for i in 0..out.len() {
|
||||||
|
out[i] = (
|
||||||
|
Complex::new(self.alphar[i].clone(), self.alphai[i].clone()),
|
||||||
|
self.beta[i].clone(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
out
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Lapack functions dispatch.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/// Trait implemented by scalars for which Lapack implements the RealField QZ decomposition.
|
||||||
|
pub trait QZScalar: Scalar {
|
||||||
|
#[allow(missing_docs)]
|
||||||
|
fn xgges(
|
||||||
|
jobvsl: u8,
|
||||||
|
jobvsr: u8,
|
||||||
|
sort: u8,
|
||||||
|
// select: ???
|
||||||
|
n: i32,
|
||||||
|
a: &mut [Self],
|
||||||
|
lda: i32,
|
||||||
|
b: &mut [Self],
|
||||||
|
ldb: i32,
|
||||||
|
sdim: &mut i32,
|
||||||
|
alphar: &mut [Self],
|
||||||
|
alphai: &mut [Self],
|
||||||
|
beta: &mut [Self],
|
||||||
|
vsl: &mut [Self],
|
||||||
|
ldvsl: i32,
|
||||||
|
vsr: &mut [Self],
|
||||||
|
ldvsr: i32,
|
||||||
|
work: &mut [Self],
|
||||||
|
lwork: i32,
|
||||||
|
bwork: &mut [i32],
|
||||||
|
info: &mut i32,
|
||||||
|
);
|
||||||
|
|
||||||
|
#[allow(missing_docs)]
|
||||||
|
fn xgges_work_size(
|
||||||
|
jobvsl: u8,
|
||||||
|
jobvsr: u8,
|
||||||
|
sort: u8,
|
||||||
|
// select: ???
|
||||||
|
n: i32,
|
||||||
|
a: &mut [Self],
|
||||||
|
lda: i32,
|
||||||
|
b: &mut [Self],
|
||||||
|
ldb: i32,
|
||||||
|
sdim: &mut i32,
|
||||||
|
alphar: &mut [Self],
|
||||||
|
alphai: &mut [Self],
|
||||||
|
beta: &mut [Self],
|
||||||
|
vsl: &mut [Self],
|
||||||
|
ldvsl: i32,
|
||||||
|
vsr: &mut [Self],
|
||||||
|
ldvsr: i32,
|
||||||
|
bwork: &mut [i32],
|
||||||
|
info: &mut i32,
|
||||||
|
) -> i32;
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! qz_scalar_impl (
|
||||||
|
($N: ty, $xgges: path) => (
|
||||||
|
impl QZScalar for $N {
|
||||||
|
#[inline]
|
||||||
|
fn xgges(jobvsl: u8,
|
||||||
|
jobvsr: u8,
|
||||||
|
sort: u8,
|
||||||
|
// select: ???
|
||||||
|
n: i32,
|
||||||
|
a: &mut [$N],
|
||||||
|
lda: i32,
|
||||||
|
b: &mut [$N],
|
||||||
|
ldb: i32,
|
||||||
|
sdim: &mut i32,
|
||||||
|
alphar: &mut [$N],
|
||||||
|
alphai: &mut [$N],
|
||||||
|
beta : &mut [$N],
|
||||||
|
vsl: &mut [$N],
|
||||||
|
ldvsl: i32,
|
||||||
|
vsr: &mut [$N],
|
||||||
|
ldvsr: i32,
|
||||||
|
work: &mut [$N],
|
||||||
|
lwork: i32,
|
||||||
|
bwork: &mut [i32],
|
||||||
|
info: &mut i32) {
|
||||||
|
unsafe { $xgges(jobvsl, jobvsr, sort, None, n, a, lda, b, ldb, sdim, alphar, alphai, beta, vsl, ldvsl, vsr, ldvsr, work, lwork, bwork, info); }
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn xgges_work_size(jobvsl: u8,
|
||||||
|
jobvsr: u8,
|
||||||
|
sort: u8,
|
||||||
|
// select: ???
|
||||||
|
n: i32,
|
||||||
|
a: &mut [$N],
|
||||||
|
lda: i32,
|
||||||
|
b: &mut [$N],
|
||||||
|
ldb: i32,
|
||||||
|
sdim: &mut i32,
|
||||||
|
alphar: &mut [$N],
|
||||||
|
alphai: &mut [$N],
|
||||||
|
beta : &mut [$N],
|
||||||
|
vsl: &mut [$N],
|
||||||
|
ldvsl: i32,
|
||||||
|
vsr: &mut [$N],
|
||||||
|
ldvsr: i32,
|
||||||
|
bwork: &mut [i32],
|
||||||
|
info: &mut i32)
|
||||||
|
-> i32 {
|
||||||
|
let mut work = [ Zero::zero() ];
|
||||||
|
let lwork = -1 as i32;
|
||||||
|
|
||||||
|
unsafe { $xgges(jobvsl, jobvsr, sort, None, n, a, lda, b, ldb, sdim, alphar, alphai, beta, vsl, ldvsl, vsr, ldvsr, &mut work, lwork, bwork, info); }
|
||||||
|
ComplexHelper::real_part(work[0]) as i32
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
qz_scalar_impl!(f32, lapack::sgges);
|
||||||
|
qz_scalar_impl!(f64, lapack::dgges);
|
72
nalgebra-lapack/tests/linalg/generalized_eigenvalues.rs
Normal file
72
nalgebra-lapack/tests/linalg/generalized_eigenvalues.rs
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
use na::dimension::Const;
|
||||||
|
use na::{DMatrix, OMatrix};
|
||||||
|
use nl::GeneralizedEigen;
|
||||||
|
use num_complex::Complex;
|
||||||
|
use simba::scalar::ComplexField;
|
||||||
|
|
||||||
|
use crate::proptest::*;
|
||||||
|
use proptest::{prop_assert, prop_compose, proptest};
|
||||||
|
|
||||||
|
prop_compose! {
|
||||||
|
fn f64_dynamic_dim_squares()
|
||||||
|
(n in PROPTEST_MATRIX_DIM)
|
||||||
|
(a in matrix(PROPTEST_F64,n,n), b in matrix(PROPTEST_F64,n,n)) -> (DMatrix<f64>, DMatrix<f64>){
|
||||||
|
(a,b)
|
||||||
|
}}
|
||||||
|
|
||||||
|
proptest! {
|
||||||
|
#[test]
|
||||||
|
fn ge((a,b) in f64_dynamic_dim_squares()){
|
||||||
|
|
||||||
|
let a_c = a.clone().map(|x| Complex::new(x, 0.0));
|
||||||
|
let b_c = b.clone().map(|x| Complex::new(x, 0.0));
|
||||||
|
let n = a.shape_generic().0;
|
||||||
|
|
||||||
|
let ge = GeneralizedEigen::new(a.clone(), b.clone());
|
||||||
|
let (vsl,vsr) = ge.clone().eigenvectors();
|
||||||
|
|
||||||
|
|
||||||
|
for (i,(alpha,beta)) in ge.raw_eigenvalues().iter().enumerate() {
|
||||||
|
let l_a = a_c.clone() * Complex::new(*beta, 0.0);
|
||||||
|
let l_b = b_c.clone() * *alpha;
|
||||||
|
|
||||||
|
prop_assert!(
|
||||||
|
relative_eq!(
|
||||||
|
((&l_a - &l_b)*vsr.column(i)).map(|x| x.modulus()),
|
||||||
|
OMatrix::zeros_generic(n, Const::<1>),
|
||||||
|
epsilon = 1.0e-5));
|
||||||
|
|
||||||
|
prop_assert!(
|
||||||
|
relative_eq!(
|
||||||
|
(vsl.column(i).adjoint()*(&l_a - &l_b)).map(|x| x.modulus()),
|
||||||
|
OMatrix::zeros_generic(Const::<1>, n),
|
||||||
|
epsilon = 1.0e-5))
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ge_static(a in matrix4(), b in matrix4()) {
|
||||||
|
|
||||||
|
let ge = GeneralizedEigen::new(a.clone(), b.clone());
|
||||||
|
let a_c =a.clone().map(|x| Complex::new(x, 0.0));
|
||||||
|
let b_c = b.clone().map(|x| Complex::new(x, 0.0));
|
||||||
|
let (vsl,vsr) = ge.eigenvectors();
|
||||||
|
let eigenvalues = ge.raw_eigenvalues();
|
||||||
|
|
||||||
|
for (i,(alpha,beta)) in eigenvalues.iter().enumerate() {
|
||||||
|
let l_a = a_c.clone() * Complex::new(*beta, 0.0);
|
||||||
|
let l_b = b_c.clone() * *alpha;
|
||||||
|
|
||||||
|
prop_assert!(
|
||||||
|
relative_eq!(
|
||||||
|
((&l_a - &l_b)*vsr.column(i)).map(|x| x.modulus()),
|
||||||
|
OMatrix::zeros_generic(Const::<4>, Const::<1>),
|
||||||
|
epsilon = 1.0e-5));
|
||||||
|
prop_assert!(
|
||||||
|
relative_eq!((vsl.column(i).adjoint()*(&l_a - &l_b)).map(|x| x.modulus()),
|
||||||
|
OMatrix::zeros_generic(Const::<1>, Const::<4>),
|
||||||
|
epsilon = 1.0e-5))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -1,6 +1,8 @@
|
|||||||
mod cholesky;
|
mod cholesky;
|
||||||
|
mod generalized_eigenvalues;
|
||||||
mod lu;
|
mod lu;
|
||||||
mod qr;
|
mod qr;
|
||||||
|
mod qz;
|
||||||
mod real_eigensystem;
|
mod real_eigensystem;
|
||||||
mod schur;
|
mod schur;
|
||||||
mod svd;
|
mod svd;
|
||||||
|
34
nalgebra-lapack/tests/linalg/qz.rs
Normal file
34
nalgebra-lapack/tests/linalg/qz.rs
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
use na::DMatrix;
|
||||||
|
use nl::QZ;
|
||||||
|
|
||||||
|
use crate::proptest::*;
|
||||||
|
use proptest::{prop_assert, prop_compose, proptest};
|
||||||
|
|
||||||
|
prop_compose! {
|
||||||
|
fn f64_dynamic_dim_squares()
|
||||||
|
(n in PROPTEST_MATRIX_DIM)
|
||||||
|
(a in matrix(PROPTEST_F64,n,n), b in matrix(PROPTEST_F64,n,n)) -> (DMatrix<f64>, DMatrix<f64>){
|
||||||
|
(a,b)
|
||||||
|
}}
|
||||||
|
|
||||||
|
proptest! {
|
||||||
|
#[test]
|
||||||
|
fn qz((a,b) in f64_dynamic_dim_squares()) {
|
||||||
|
|
||||||
|
let qz = QZ::new(a.clone(), b.clone());
|
||||||
|
let (vsl,s,t,vsr) = qz.clone().unpack();
|
||||||
|
|
||||||
|
prop_assert!(relative_eq!(&vsl * s * vsr.transpose(), a, epsilon = 1.0e-7));
|
||||||
|
prop_assert!(relative_eq!(vsl * t * vsr.transpose(), b, epsilon = 1.0e-7));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn qz_static(a in matrix4(), b in matrix4()) {
|
||||||
|
let qz = QZ::new(a.clone(), b.clone());
|
||||||
|
let (vsl,s,t,vsr) = qz.unpack();
|
||||||
|
|
||||||
|
prop_assert!(relative_eq!(&vsl * s * vsr.transpose(), a, epsilon = 1.0e-7));
|
||||||
|
prop_assert!(relative_eq!(vsl * t * vsr.transpose(), b, epsilon = 1.0e-7));
|
||||||
|
}
|
||||||
|
}
|
@ -21,5 +21,5 @@ quote = "1.0"
|
|||||||
proc-macro2 = "1.0"
|
proc-macro2 = "1.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
nalgebra = { version = "0.30.0", path = ".." }
|
nalgebra = { version = "0.31.0", path = ".." }
|
||||||
trybuild = "1.0.42"
|
trybuild = "1.0.42"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "nalgebra-sparse"
|
name = "nalgebra-sparse"
|
||||||
version = "0.6.0"
|
version = "0.7.0"
|
||||||
authors = [ "Andreas Longva", "Sébastien Crozet <developer@crozet.re>" ]
|
authors = [ "Andreas Longva", "Sébastien Crozet <developer@crozet.re>" ]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Sparse matrix computation based on nalgebra."
|
description = "Sparse matrix computation based on nalgebra."
|
||||||
@ -24,7 +24,7 @@ io = [ "pest", "pest_derive" ]
|
|||||||
slow-tests = []
|
slow-tests = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
nalgebra = { version="0.30", path = "../" }
|
nalgebra = { version="0.31", path = "../" }
|
||||||
num-traits = { version = "0.2", default-features = false }
|
num-traits = { version = "0.2", default-features = false }
|
||||||
proptest = { version = "1.0", optional = true }
|
proptest = { version = "1.0", optional = true }
|
||||||
matrixcompare-core = { version = "0.1.0", optional = true }
|
matrixcompare-core = { version = "0.1.0", optional = true }
|
||||||
@ -35,8 +35,8 @@ serde = { version = "1.0", default-features = false, features = [ "derive" ], op
|
|||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
matrixcompare = { version = "0.3.0", features = [ "proptest-support" ] }
|
matrixcompare = { version = "0.3.0", features = [ "proptest-support" ] }
|
||||||
nalgebra = { version="0.30", path = "../", features = ["compare"] }
|
nalgebra = { version="0.31", path = "../", features = ["compare"] }
|
||||||
tempfile = "3"
|
tempfile = "3.3"
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
|
@ -3,7 +3,7 @@ use crate::csr::CsrMatrix;
|
|||||||
|
|
||||||
use crate::ops::serial::{
|
use crate::ops::serial::{
|
||||||
spadd_csc_prealloc, spadd_csr_prealloc, spadd_pattern, spmm_csc_dense, spmm_csc_pattern,
|
spadd_csc_prealloc, spadd_csr_prealloc, spadd_pattern, spmm_csc_dense, spmm_csc_pattern,
|
||||||
spmm_csc_prealloc, spmm_csr_dense, spmm_csr_pattern, spmm_csr_prealloc,
|
spmm_csc_prealloc_unchecked, spmm_csr_dense, spmm_csr_pattern, spmm_csr_prealloc_unchecked,
|
||||||
};
|
};
|
||||||
use crate::ops::Op;
|
use crate::ops::Op;
|
||||||
use nalgebra::allocator::Allocator;
|
use nalgebra::allocator::Allocator;
|
||||||
@ -112,9 +112,9 @@ macro_rules! impl_spmm {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl_spmm!(CsrMatrix, spmm_csr_pattern, spmm_csr_prealloc);
|
impl_spmm!(CsrMatrix, spmm_csr_pattern, spmm_csr_prealloc_unchecked);
|
||||||
// Need to switch order of operations for CSC pattern
|
// Need to switch order of operations for CSC pattern
|
||||||
impl_spmm!(CscMatrix, spmm_csc_pattern, spmm_csc_prealloc);
|
impl_spmm!(CscMatrix, spmm_csc_pattern, spmm_csc_prealloc_unchecked);
|
||||||
|
|
||||||
/// Implements Scalar * Matrix operations for *concrete* scalar types. The reason this is necessary
|
/// Implements Scalar * Matrix operations for *concrete* scalar types. The reason this is necessary
|
||||||
/// is that we are not able to implement Mul<Matrix<T>> for all T generically due to orphan rules.
|
/// is that we are not able to implement Mul<Matrix<T>> for all T generically due to orphan rules.
|
||||||
|
@ -20,6 +20,51 @@ fn spmm_cs_unexpected_entry() -> OperationError {
|
|||||||
/// reversed (since transpose(AB) = transpose(B) * transpose(A) and CSC(A) = transpose(CSR(A)).
|
/// reversed (since transpose(AB) = transpose(B) * transpose(A) and CSC(A) = transpose(CSR(A)).
|
||||||
///
|
///
|
||||||
/// We assume here that the matrices have already been verified to be dimensionally compatible.
|
/// We assume here that the matrices have already been verified to be dimensionally compatible.
|
||||||
|
pub fn spmm_cs_prealloc_unchecked<T>(
|
||||||
|
beta: T,
|
||||||
|
c: &mut CsMatrix<T>,
|
||||||
|
alpha: T,
|
||||||
|
a: &CsMatrix<T>,
|
||||||
|
b: &CsMatrix<T>,
|
||||||
|
) -> Result<(), OperationError>
|
||||||
|
where
|
||||||
|
T: Scalar + ClosedAdd + ClosedMul + Zero + One,
|
||||||
|
{
|
||||||
|
assert_eq!(c.pattern().major_dim(), a.pattern().major_dim());
|
||||||
|
assert_eq!(c.pattern().minor_dim(), b.pattern().minor_dim());
|
||||||
|
let some_val = Zero::zero();
|
||||||
|
let mut scratchpad_values: Vec<T> = vec![some_val; b.pattern().minor_dim()];
|
||||||
|
for i in 0..c.pattern().major_dim() {
|
||||||
|
let a_lane_i = a.get_lane(i).unwrap();
|
||||||
|
|
||||||
|
let mut c_lane_i = c.get_lane_mut(i).unwrap();
|
||||||
|
|
||||||
|
for (&k, a_ik) in a_lane_i.minor_indices().iter().zip(a_lane_i.values()) {
|
||||||
|
let b_lane_k = b.get_lane(k).unwrap();
|
||||||
|
let alpha_aik = alpha.clone() * a_ik.clone();
|
||||||
|
for (j, b_kj) in b_lane_k.minor_indices().iter().zip(b_lane_k.values()) {
|
||||||
|
// use a dense scatter vector to accumulate non-zeros quickly
|
||||||
|
unsafe {
|
||||||
|
*scratchpad_values.get_unchecked_mut(*j) += alpha_aik.clone() * b_kj.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Get indices from C pattern and gather from the dense scratchpad_values
|
||||||
|
let (indices, values) = c_lane_i.indices_and_values_mut();
|
||||||
|
values
|
||||||
|
.iter_mut()
|
||||||
|
.zip(indices)
|
||||||
|
.for_each(|(output_ref, index)| unsafe {
|
||||||
|
*output_ref = beta.clone() * output_ref.clone()
|
||||||
|
+ scratchpad_values.get_unchecked(*index).clone();
|
||||||
|
*scratchpad_values.get_unchecked_mut(*index) = Zero::zero();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn spmm_cs_prealloc<T>(
|
pub fn spmm_cs_prealloc<T>(
|
||||||
beta: T,
|
beta: T,
|
||||||
c: &mut CsMatrix<T>,
|
c: &mut CsMatrix<T>,
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
use crate::csc::CscMatrix;
|
use crate::csc::CscMatrix;
|
||||||
use crate::ops::serial::cs::{spadd_cs_prealloc, spmm_cs_dense, spmm_cs_prealloc};
|
use crate::ops::serial::cs::{
|
||||||
|
spadd_cs_prealloc, spmm_cs_dense, spmm_cs_prealloc, spmm_cs_prealloc_unchecked,
|
||||||
|
};
|
||||||
use crate::ops::serial::{OperationError, OperationErrorKind};
|
use crate::ops::serial::{OperationError, OperationErrorKind};
|
||||||
use crate::ops::Op;
|
use crate::ops::Op;
|
||||||
use nalgebra::{ClosedAdd, ClosedMul, DMatrixSlice, DMatrixSliceMut, RealField, Scalar};
|
use nalgebra::{ClosedAdd, ClosedMul, DMatrixSlice, DMatrixSliceMut, RealField, Scalar};
|
||||||
@ -83,35 +85,81 @@ where
|
|||||||
{
|
{
|
||||||
assert_compatible_spmm_dims!(c, a, b);
|
assert_compatible_spmm_dims!(c, a, b);
|
||||||
|
|
||||||
use Op::{NoOp, Transpose};
|
use Op::NoOp;
|
||||||
|
|
||||||
match (&a, &b) {
|
match (&a, &b) {
|
||||||
(NoOp(ref a), NoOp(ref b)) => {
|
(NoOp(ref a), NoOp(ref b)) => {
|
||||||
// Note: We have to reverse the order for CSC matrices
|
// Note: We have to reverse the order for CSC matrices
|
||||||
spmm_cs_prealloc(beta, &mut c.cs, alpha, &b.cs, &a.cs)
|
spmm_cs_prealloc(beta, &mut c.cs, alpha, &b.cs, &a.cs)
|
||||||
}
|
}
|
||||||
_ => {
|
_ => spmm_csc_transposed(beta, c, alpha, a, b, spmm_csc_prealloc),
|
||||||
// Currently we handle transposition by explicitly precomputing transposed matrices
|
|
||||||
// and calling the operation again without transposition
|
|
||||||
let a_ref: &CscMatrix<T> = a.inner_ref();
|
|
||||||
let b_ref: &CscMatrix<T> = b.inner_ref();
|
|
||||||
let (a, b) = {
|
|
||||||
use Cow::*;
|
|
||||||
match (&a, &b) {
|
|
||||||
(NoOp(_), NoOp(_)) => unreachable!(),
|
|
||||||
(Transpose(ref a), NoOp(_)) => (Owned(a.transpose()), Borrowed(b_ref)),
|
|
||||||
(NoOp(_), Transpose(ref b)) => (Borrowed(a_ref), Owned(b.transpose())),
|
|
||||||
(Transpose(ref a), Transpose(ref b)) => {
|
|
||||||
(Owned(a.transpose()), Owned(b.transpose()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
spmm_csc_prealloc(beta, c, alpha, NoOp(a.as_ref()), NoOp(b.as_ref()))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Faster sparse-sparse matrix multiplication, `C <- beta * C + alpha * op(A) * op(B)`.
|
||||||
|
/// This will not return an error even if the patterns don't match.
|
||||||
|
/// Should be used for situations where pattern creation immediately preceeds multiplication.
|
||||||
|
///
|
||||||
|
/// Panics if the dimensions of the matrices involved are not compatible with the expression.
|
||||||
|
pub fn spmm_csc_prealloc_unchecked<T>(
|
||||||
|
beta: T,
|
||||||
|
c: &mut CscMatrix<T>,
|
||||||
|
alpha: T,
|
||||||
|
a: Op<&CscMatrix<T>>,
|
||||||
|
b: Op<&CscMatrix<T>>,
|
||||||
|
) -> Result<(), OperationError>
|
||||||
|
where
|
||||||
|
T: Scalar + ClosedAdd + ClosedMul + Zero + One,
|
||||||
|
{
|
||||||
|
assert_compatible_spmm_dims!(c, a, b);
|
||||||
|
|
||||||
|
use Op::NoOp;
|
||||||
|
|
||||||
|
match (&a, &b) {
|
||||||
|
(NoOp(ref a), NoOp(ref b)) => {
|
||||||
|
// Note: We have to reverse the order for CSC matrices
|
||||||
|
spmm_cs_prealloc_unchecked(beta, &mut c.cs, alpha, &b.cs, &a.cs)
|
||||||
|
}
|
||||||
|
_ => spmm_csc_transposed(beta, c, alpha, a, b, spmm_csc_prealloc_unchecked),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn spmm_csc_transposed<T, F>(
|
||||||
|
beta: T,
|
||||||
|
c: &mut CscMatrix<T>,
|
||||||
|
alpha: T,
|
||||||
|
a: Op<&CscMatrix<T>>,
|
||||||
|
b: Op<&CscMatrix<T>>,
|
||||||
|
spmm_kernel: F,
|
||||||
|
) -> Result<(), OperationError>
|
||||||
|
where
|
||||||
|
T: Scalar + ClosedAdd + ClosedMul + Zero + One,
|
||||||
|
F: Fn(
|
||||||
|
T,
|
||||||
|
&mut CscMatrix<T>,
|
||||||
|
T,
|
||||||
|
Op<&CscMatrix<T>>,
|
||||||
|
Op<&CscMatrix<T>>,
|
||||||
|
) -> Result<(), OperationError>,
|
||||||
|
{
|
||||||
|
use Op::{NoOp, Transpose};
|
||||||
|
|
||||||
|
// Currently we handle transposition by explicitly precomputing transposed matrices
|
||||||
|
// and calling the operation again without transposition
|
||||||
|
let a_ref: &CscMatrix<T> = a.inner_ref();
|
||||||
|
let b_ref: &CscMatrix<T> = b.inner_ref();
|
||||||
|
let (a, b) = {
|
||||||
|
use Cow::*;
|
||||||
|
match (&a, &b) {
|
||||||
|
(NoOp(_), NoOp(_)) => unreachable!(),
|
||||||
|
(Transpose(ref a), NoOp(_)) => (Owned(a.transpose()), Borrowed(b_ref)),
|
||||||
|
(NoOp(_), Transpose(ref b)) => (Borrowed(a_ref), Owned(b.transpose())),
|
||||||
|
(Transpose(ref a), Transpose(ref b)) => (Owned(a.transpose()), Owned(b.transpose())),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
spmm_kernel(beta, c, alpha, NoOp(a.as_ref()), NoOp(b.as_ref()))
|
||||||
|
}
|
||||||
|
|
||||||
/// Solve the lower triangular system `op(L) X = B`.
|
/// Solve the lower triangular system `op(L) X = B`.
|
||||||
///
|
///
|
||||||
/// Only the lower triangular part of L is read, and the result is stored in B.
|
/// Only the lower triangular part of L is read, and the result is stored in B.
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
use crate::csr::CsrMatrix;
|
use crate::csr::CsrMatrix;
|
||||||
use crate::ops::serial::cs::{spadd_cs_prealloc, spmm_cs_dense, spmm_cs_prealloc};
|
use crate::ops::serial::cs::{
|
||||||
|
spadd_cs_prealloc, spmm_cs_dense, spmm_cs_prealloc, spmm_cs_prealloc_unchecked,
|
||||||
|
};
|
||||||
use crate::ops::serial::OperationError;
|
use crate::ops::serial::OperationError;
|
||||||
use crate::ops::Op;
|
use crate::ops::Op;
|
||||||
use nalgebra::{ClosedAdd, ClosedMul, DMatrixSlice, DMatrixSliceMut, Scalar};
|
use nalgebra::{ClosedAdd, ClosedMul, DMatrixSlice, DMatrixSliceMut, Scalar};
|
||||||
@ -77,30 +79,73 @@ where
|
|||||||
{
|
{
|
||||||
assert_compatible_spmm_dims!(c, a, b);
|
assert_compatible_spmm_dims!(c, a, b);
|
||||||
|
|
||||||
use Op::{NoOp, Transpose};
|
use Op::NoOp;
|
||||||
|
|
||||||
match (&a, &b) {
|
match (&a, &b) {
|
||||||
(NoOp(ref a), NoOp(ref b)) => spmm_cs_prealloc(beta, &mut c.cs, alpha, &a.cs, &b.cs),
|
(NoOp(ref a), NoOp(ref b)) => spmm_cs_prealloc(beta, &mut c.cs, alpha, &a.cs, &b.cs),
|
||||||
_ => {
|
_ => spmm_csr_transposed(beta, c, alpha, a, b, spmm_csr_prealloc),
|
||||||
// Currently we handle transposition by explicitly precomputing transposed matrices
|
|
||||||
// and calling the operation again without transposition
|
|
||||||
// TODO: At least use workspaces to allow control of allocations. Maybe
|
|
||||||
// consider implementing certain patterns (like A^T * B) explicitly
|
|
||||||
let a_ref: &CsrMatrix<T> = a.inner_ref();
|
|
||||||
let b_ref: &CsrMatrix<T> = b.inner_ref();
|
|
||||||
let (a, b) = {
|
|
||||||
use Cow::*;
|
|
||||||
match (&a, &b) {
|
|
||||||
(NoOp(_), NoOp(_)) => unreachable!(),
|
|
||||||
(Transpose(ref a), NoOp(_)) => (Owned(a.transpose()), Borrowed(b_ref)),
|
|
||||||
(NoOp(_), Transpose(ref b)) => (Borrowed(a_ref), Owned(b.transpose())),
|
|
||||||
(Transpose(ref a), Transpose(ref b)) => {
|
|
||||||
(Owned(a.transpose()), Owned(b.transpose()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
spmm_csr_prealloc(beta, c, alpha, NoOp(a.as_ref()), NoOp(b.as_ref()))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Faster sparse-sparse matrix multiplication, `C <- beta * C + alpha * op(A) * op(B)`.
|
||||||
|
/// This will not return an error even if the patterns don't match.
|
||||||
|
/// Should be used for situations where pattern creation immediately preceeds multiplication.
|
||||||
|
///
|
||||||
|
/// Panics if the dimensions of the matrices involved are not compatible with the expression.
|
||||||
|
pub fn spmm_csr_prealloc_unchecked<T>(
|
||||||
|
beta: T,
|
||||||
|
c: &mut CsrMatrix<T>,
|
||||||
|
alpha: T,
|
||||||
|
a: Op<&CsrMatrix<T>>,
|
||||||
|
b: Op<&CsrMatrix<T>>,
|
||||||
|
) -> Result<(), OperationError>
|
||||||
|
where
|
||||||
|
T: Scalar + ClosedAdd + ClosedMul + Zero + One,
|
||||||
|
{
|
||||||
|
assert_compatible_spmm_dims!(c, a, b);
|
||||||
|
|
||||||
|
use Op::NoOp;
|
||||||
|
|
||||||
|
match (&a, &b) {
|
||||||
|
(NoOp(ref a), NoOp(ref b)) => {
|
||||||
|
spmm_cs_prealloc_unchecked(beta, &mut c.cs, alpha, &a.cs, &b.cs)
|
||||||
|
}
|
||||||
|
_ => spmm_csr_transposed(beta, c, alpha, a, b, spmm_csr_prealloc_unchecked),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn spmm_csr_transposed<T, F>(
|
||||||
|
beta: T,
|
||||||
|
c: &mut CsrMatrix<T>,
|
||||||
|
alpha: T,
|
||||||
|
a: Op<&CsrMatrix<T>>,
|
||||||
|
b: Op<&CsrMatrix<T>>,
|
||||||
|
spmm_kernel: F,
|
||||||
|
) -> Result<(), OperationError>
|
||||||
|
where
|
||||||
|
T: Scalar + ClosedAdd + ClosedMul + Zero + One,
|
||||||
|
F: Fn(
|
||||||
|
T,
|
||||||
|
&mut CsrMatrix<T>,
|
||||||
|
T,
|
||||||
|
Op<&CsrMatrix<T>>,
|
||||||
|
Op<&CsrMatrix<T>>,
|
||||||
|
) -> Result<(), OperationError>,
|
||||||
|
{
|
||||||
|
use Op::{NoOp, Transpose};
|
||||||
|
|
||||||
|
// Currently we handle transposition by explicitly precomputing transposed matrices
|
||||||
|
// and calling the operation again without transposition
|
||||||
|
let a_ref: &CsrMatrix<T> = a.inner_ref();
|
||||||
|
let b_ref: &CsrMatrix<T> = b.inner_ref();
|
||||||
|
let (a, b) = {
|
||||||
|
use Cow::*;
|
||||||
|
match (&a, &b) {
|
||||||
|
(NoOp(_), NoOp(_)) => unreachable!(),
|
||||||
|
(Transpose(ref a), NoOp(_)) => (Owned(a.transpose()), Borrowed(b_ref)),
|
||||||
|
(NoOp(_), Transpose(ref b)) => (Borrowed(a_ref), Owned(b.transpose())),
|
||||||
|
(Transpose(ref a), Transpose(ref b)) => (Owned(a.transpose()), Owned(b.transpose())),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
spmm_kernel(beta, c, alpha, NoOp(a.as_ref()), NoOp(b.as_ref()))
|
||||||
|
}
|
||||||
|
@ -6,7 +6,8 @@ use nalgebra_sparse::csc::CscMatrix;
|
|||||||
use nalgebra_sparse::csr::CsrMatrix;
|
use nalgebra_sparse::csr::CsrMatrix;
|
||||||
use nalgebra_sparse::ops::serial::{
|
use nalgebra_sparse::ops::serial::{
|
||||||
spadd_csc_prealloc, spadd_csr_prealloc, spadd_pattern, spmm_csc_dense, spmm_csc_prealloc,
|
spadd_csc_prealloc, spadd_csr_prealloc, spadd_pattern, spmm_csc_dense, spmm_csc_prealloc,
|
||||||
spmm_csr_dense, spmm_csr_pattern, spmm_csr_prealloc, spsolve_csc_lower_triangular,
|
spmm_csc_prealloc_unchecked, spmm_csr_dense, spmm_csr_pattern, spmm_csr_prealloc,
|
||||||
|
spmm_csr_prealloc_unchecked, spsolve_csc_lower_triangular,
|
||||||
};
|
};
|
||||||
use nalgebra_sparse::ops::Op;
|
use nalgebra_sparse::ops::Op;
|
||||||
use nalgebra_sparse::pattern::SparsityPattern;
|
use nalgebra_sparse::pattern::SparsityPattern;
|
||||||
@ -543,6 +544,29 @@ proptest! {
|
|||||||
prop_assert_eq!(&c_pattern, c_csr.pattern());
|
prop_assert_eq!(&c_pattern, c_csr.pattern());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn spmm_csr_prealloc_unchecked_test(SpmmCsrArgs { c, beta, alpha, a, b }
|
||||||
|
in spmm_csr_prealloc_args_strategy()
|
||||||
|
) {
|
||||||
|
// Test that we get the expected result by comparing to an equivalent dense operation
|
||||||
|
// (here we give in the C matrix, so the sparsity pattern is essentially fixed)
|
||||||
|
let mut c_sparse = c.clone();
|
||||||
|
spmm_csr_prealloc_unchecked(beta, &mut c_sparse, alpha, a.as_ref(), b.as_ref()).unwrap();
|
||||||
|
|
||||||
|
let mut c_dense = DMatrix::from(&c);
|
||||||
|
let op_a_dense = match a {
|
||||||
|
Op::NoOp(ref a) => DMatrix::from(a),
|
||||||
|
Op::Transpose(ref a) => DMatrix::from(a).transpose(),
|
||||||
|
};
|
||||||
|
let op_b_dense = match b {
|
||||||
|
Op::NoOp(ref b) => DMatrix::from(b),
|
||||||
|
Op::Transpose(ref b) => DMatrix::from(b).transpose(),
|
||||||
|
};
|
||||||
|
c_dense = beta * c_dense + alpha * &op_a_dense * op_b_dense;
|
||||||
|
|
||||||
|
prop_assert_eq!(&DMatrix::from(&c_sparse), &c_dense);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn spmm_csr_prealloc_test(SpmmCsrArgs { c, beta, alpha, a, b }
|
fn spmm_csr_prealloc_test(SpmmCsrArgs { c, beta, alpha, a, b }
|
||||||
in spmm_csr_prealloc_args_strategy()
|
in spmm_csr_prealloc_args_strategy()
|
||||||
@ -705,6 +729,29 @@ proptest! {
|
|||||||
prop_assert_eq!(&DMatrix::from(&c_sparse), &c_dense);
|
prop_assert_eq!(&DMatrix::from(&c_sparse), &c_dense);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn spmm_csc_prealloc_unchecked_test(SpmmCscArgs { c, beta, alpha, a, b }
|
||||||
|
in spmm_csc_prealloc_args_strategy()
|
||||||
|
) {
|
||||||
|
// Test that we get the expected result by comparing to an equivalent dense operation
|
||||||
|
// (here we give in the C matrix, so the sparsity pattern is essentially fixed)
|
||||||
|
let mut c_sparse = c.clone();
|
||||||
|
spmm_csc_prealloc_unchecked(beta, &mut c_sparse, alpha, a.as_ref(), b.as_ref()).unwrap();
|
||||||
|
|
||||||
|
let mut c_dense = DMatrix::from(&c);
|
||||||
|
let op_a_dense = match a {
|
||||||
|
Op::NoOp(ref a) => DMatrix::from(a),
|
||||||
|
Op::Transpose(ref a) => DMatrix::from(a).transpose(),
|
||||||
|
};
|
||||||
|
let op_b_dense = match b {
|
||||||
|
Op::NoOp(ref b) => DMatrix::from(b),
|
||||||
|
Op::Transpose(ref b) => DMatrix::from(b).transpose(),
|
||||||
|
};
|
||||||
|
c_dense = beta * c_dense + alpha * &op_a_dense * op_b_dense;
|
||||||
|
|
||||||
|
prop_assert_eq!(&DMatrix::from(&c_sparse), &c_dense);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn spmm_csc_prealloc_panics_on_dim_mismatch(
|
fn spmm_csc_prealloc_panics_on_dim_mismatch(
|
||||||
(alpha, beta, c, a, b)
|
(alpha, beta, c, a, b)
|
||||||
|
@ -27,6 +27,11 @@ use std::mem;
|
|||||||
/// A array-based statically sized matrix data storage.
|
/// A array-based statically sized matrix data storage.
|
||||||
#[repr(transparent)]
|
#[repr(transparent)]
|
||||||
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
||||||
pub struct ArrayStorage<T, const R: usize, const C: usize>(pub [[T; R]; C]);
|
pub struct ArrayStorage<T, const R: usize, const C: usize>(pub [[T; R]; C]);
|
||||||
|
|
||||||
@ -273,45 +278,3 @@ unsafe impl<T: Scalar + Copy + bytemuck::Pod, const R: usize, const C: usize> by
|
|||||||
for ArrayStorage<T, R, C>
|
for ArrayStorage<T, R, C>
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "rkyv-serialize-no-std")]
|
|
||||||
mod rkyv_impl {
|
|
||||||
use super::ArrayStorage;
|
|
||||||
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
|
|
||||||
|
|
||||||
impl<T: Archive, const R: usize, const C: usize> Archive for ArrayStorage<T, R, C> {
|
|
||||||
type Archived = ArrayStorage<T::Archived, R, C>;
|
|
||||||
type Resolver = <[[T; R]; C] as Archive>::Resolver;
|
|
||||||
|
|
||||||
fn resolve(
|
|
||||||
&self,
|
|
||||||
pos: usize,
|
|
||||||
resolver: Self::Resolver,
|
|
||||||
out: &mut core::mem::MaybeUninit<Self::Archived>,
|
|
||||||
) {
|
|
||||||
self.0.resolve(
|
|
||||||
pos + offset_of!(Self::Archived, 0),
|
|
||||||
resolver,
|
|
||||||
project_struct!(out: Self::Archived => 0),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Serialize<S>, S: Fallible + ?Sized, const R: usize, const C: usize> Serialize<S>
|
|
||||||
for ArrayStorage<T, R, C>
|
|
||||||
{
|
|
||||||
fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
|
|
||||||
self.0.serialize(serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Archive, D: Fallible + ?Sized, const R: usize, const C: usize>
|
|
||||||
Deserialize<ArrayStorage<T, R, C>, D> for ArrayStorage<T::Archived, R, C>
|
|
||||||
where
|
|
||||||
T::Archived: Deserialize<T, D>,
|
|
||||||
{
|
|
||||||
fn deserialize(&self, deserializer: &mut D) -> Result<ArrayStorage<T, R, C>, D::Error> {
|
|
||||||
Ok(ArrayStorage(self.0.deserialize(deserializer)?))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -13,6 +13,11 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
|||||||
|
|
||||||
/// Dim of dynamically-sized algebraic entities.
|
/// Dim of dynamically-sized algebraic entities.
|
||||||
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
|
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
||||||
pub struct Dynamic {
|
pub struct Dynamic {
|
||||||
value: usize,
|
value: usize,
|
||||||
@ -198,6 +203,11 @@ dim_ops!(
|
|||||||
);
|
);
|
||||||
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
||||||
pub struct Const<const R: usize>;
|
pub struct Const<const R: usize>;
|
||||||
|
|
||||||
@ -233,37 +243,6 @@ impl<'de, const D: usize> Deserialize<'de> for Const<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "rkyv-serialize-no-std")]
|
|
||||||
mod rkyv_impl {
|
|
||||||
use super::Const;
|
|
||||||
use rkyv::{Archive, Deserialize, Fallible, Serialize};
|
|
||||||
|
|
||||||
impl<const R: usize> Archive for Const<R> {
|
|
||||||
type Archived = Self;
|
|
||||||
type Resolver = ();
|
|
||||||
|
|
||||||
fn resolve(
|
|
||||||
&self,
|
|
||||||
_: usize,
|
|
||||||
_: Self::Resolver,
|
|
||||||
_: &mut core::mem::MaybeUninit<Self::Archived>,
|
|
||||||
) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: Fallible + ?Sized, const R: usize> Serialize<S> for Const<R> {
|
|
||||||
fn serialize(&self, _: &mut S) -> Result<Self::Resolver, S::Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Fallible + ?Sized, const R: usize> Deserialize<Self, D> for Const<R> {
|
|
||||||
fn deserialize(&self, _: &mut D) -> Result<Self, D::Error> {
|
|
||||||
Ok(Const)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait ToConst {
|
pub trait ToConst {
|
||||||
type Const: DimName;
|
type Const: DimName;
|
||||||
}
|
}
|
||||||
|
@ -150,6 +150,11 @@ pub type MatrixCross<T, R1, C1, R2, C2> =
|
|||||||
/// some concrete types for `T` and a compatible data storage type `S`).
|
/// some concrete types for `T` and a compatible data storage type `S`).
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
||||||
pub struct Matrix<T, R, C, S> {
|
pub struct Matrix<T, R, C, S> {
|
||||||
/// The data storage that contains all the matrix components. Disappointed?
|
/// The data storage that contains all the matrix components. Disappointed?
|
||||||
@ -288,53 +293,6 @@ where
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "rkyv-serialize-no-std")]
|
|
||||||
mod rkyv_impl {
|
|
||||||
use super::Matrix;
|
|
||||||
use core::marker::PhantomData;
|
|
||||||
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
|
|
||||||
|
|
||||||
impl<T: Archive, R: Archive, C: Archive, S: Archive> Archive for Matrix<T, R, C, S> {
|
|
||||||
type Archived = Matrix<T::Archived, R::Archived, C::Archived, S::Archived>;
|
|
||||||
type Resolver = S::Resolver;
|
|
||||||
|
|
||||||
fn resolve(
|
|
||||||
&self,
|
|
||||||
pos: usize,
|
|
||||||
resolver: Self::Resolver,
|
|
||||||
out: &mut core::mem::MaybeUninit<Self::Archived>,
|
|
||||||
) {
|
|
||||||
self.data.resolve(
|
|
||||||
pos + offset_of!(Self::Archived, data),
|
|
||||||
resolver,
|
|
||||||
project_struct!(out: Self::Archived => data),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Archive, R: Archive, C: Archive, S: Serialize<_S>, _S: Fallible + ?Sized> Serialize<_S>
|
|
||||||
for Matrix<T, R, C, S>
|
|
||||||
{
|
|
||||||
fn serialize(&self, serializer: &mut _S) -> Result<Self::Resolver, _S::Error> {
|
|
||||||
self.data.serialize(serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Archive, R: Archive, C: Archive, S: Archive, D: Fallible + ?Sized>
|
|
||||||
Deserialize<Matrix<T, R, C, S>, D>
|
|
||||||
for Matrix<T::Archived, R::Archived, C::Archived, S::Archived>
|
|
||||||
where
|
|
||||||
S::Archived: Deserialize<S, D>,
|
|
||||||
{
|
|
||||||
fn deserialize(&self, deserializer: &mut D) -> Result<Matrix<T, R, C, S>, D::Error> {
|
|
||||||
Ok(Matrix {
|
|
||||||
data: self.data.deserialize(deserializer)?,
|
|
||||||
_phantoms: PhantomData,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, R, C, S> Matrix<T, R, C, S> {
|
impl<T, R, C, S> Matrix<T, R, C, S> {
|
||||||
/// Creates a new matrix with the given data without statically checking that the matrix
|
/// Creates a new matrix with the given data without statically checking that the matrix
|
||||||
/// dimension matches the storage dimension.
|
/// dimension matches the storage dimension.
|
||||||
|
@ -21,6 +21,11 @@ use crate::{Dim, Matrix, OMatrix, RealField, Scalar, SimdComplexField, SimdRealF
|
|||||||
/// in their documentation, read their dedicated pages directly.
|
/// in their documentation, read their dedicated pages directly.
|
||||||
#[repr(transparent)]
|
#[repr(transparent)]
|
||||||
#[derive(Clone, Hash, Copy)]
|
#[derive(Clone, Hash, Copy)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
// #[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
// #[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
||||||
pub struct Unit<T> {
|
pub struct Unit<T> {
|
||||||
pub(crate) value: T,
|
pub(crate) value: T,
|
||||||
@ -58,47 +63,6 @@ impl<'de, T: Deserialize<'de>> Deserialize<'de> for Unit<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "rkyv-serialize-no-std")]
|
|
||||||
mod rkyv_impl {
|
|
||||||
use super::Unit;
|
|
||||||
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
|
|
||||||
|
|
||||||
impl<T: Archive> Archive for Unit<T> {
|
|
||||||
type Archived = Unit<T::Archived>;
|
|
||||||
type Resolver = T::Resolver;
|
|
||||||
|
|
||||||
fn resolve(
|
|
||||||
&self,
|
|
||||||
pos: usize,
|
|
||||||
resolver: Self::Resolver,
|
|
||||||
out: &mut ::core::mem::MaybeUninit<Self::Archived>,
|
|
||||||
) {
|
|
||||||
self.value.resolve(
|
|
||||||
pos + offset_of!(Self::Archived, value),
|
|
||||||
resolver,
|
|
||||||
project_struct!(out: Self::Archived => value),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Serialize<S>, S: Fallible + ?Sized> Serialize<S> for Unit<T> {
|
|
||||||
fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
|
|
||||||
self.value.serialize(serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Archive, D: Fallible + ?Sized> Deserialize<Unit<T>, D> for Unit<T::Archived>
|
|
||||||
where
|
|
||||||
T::Archived: Deserialize<T, D>,
|
|
||||||
{
|
|
||||||
fn deserialize(&self, deserializer: &mut D) -> Result<Unit<T>, D::Error> {
|
|
||||||
Ok(Unit {
|
|
||||||
value: self.value.deserialize(deserializer)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "cuda")]
|
#[cfg(feature = "cuda")]
|
||||||
unsafe impl<T: cust_core::DeviceCopy, R, C, S> cust_core::DeviceCopy for Unit<Matrix<T, R, C, S>>
|
unsafe impl<T: cust_core::DeviceCopy, R, C, S> cust_core::DeviceCopy for Unit<Matrix<T, R, C, S>>
|
||||||
where
|
where
|
||||||
|
@ -40,6 +40,11 @@ use simba::scalar::{ClosedNeg, RealField};
|
|||||||
/// See <https://github.com/dimforge/nalgebra/issues/487>
|
/// See <https://github.com/dimforge/nalgebra/issues/487>
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
||||||
pub struct DualQuaternion<T> {
|
pub struct DualQuaternion<T> {
|
||||||
/// The real component of the quaternion
|
/// The real component of the quaternion
|
||||||
|
@ -66,6 +66,11 @@ use crate::geometry::{AbstractRotation, Point, Translation};
|
|||||||
Owned<T, Const<D>>: Deserialize<'de>,
|
Owned<T, Const<D>>: Deserialize<'de>,
|
||||||
T: Scalar"))
|
T: Scalar"))
|
||||||
)]
|
)]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
pub struct Isometry<T, R, const D: usize> {
|
pub struct Isometry<T, R, const D: usize> {
|
||||||
/// The pure rotational part of this isometry.
|
/// The pure rotational part of this isometry.
|
||||||
pub rotation: R,
|
pub rotation: R,
|
||||||
@ -73,66 +78,6 @@ pub struct Isometry<T, R, const D: usize> {
|
|||||||
pub translation: Translation<T, D>,
|
pub translation: Translation<T, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "rkyv-serialize-no-std")]
|
|
||||||
mod rkyv_impl {
|
|
||||||
use super::Isometry;
|
|
||||||
use crate::{base::Scalar, geometry::Translation};
|
|
||||||
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
|
|
||||||
|
|
||||||
impl<T: Scalar + Archive, R: Archive, const D: usize> Archive for Isometry<T, R, D>
|
|
||||||
where
|
|
||||||
T::Archived: Scalar,
|
|
||||||
{
|
|
||||||
type Archived = Isometry<T::Archived, R::Archived, D>;
|
|
||||||
type Resolver = (R::Resolver, <Translation<T, D> as Archive>::Resolver);
|
|
||||||
|
|
||||||
fn resolve(
|
|
||||||
&self,
|
|
||||||
pos: usize,
|
|
||||||
resolver: Self::Resolver,
|
|
||||||
out: &mut core::mem::MaybeUninit<Self::Archived>,
|
|
||||||
) {
|
|
||||||
self.rotation.resolve(
|
|
||||||
pos + offset_of!(Self::Archived, rotation),
|
|
||||||
resolver.0,
|
|
||||||
project_struct!(out: Self::Archived => rotation),
|
|
||||||
);
|
|
||||||
self.translation.resolve(
|
|
||||||
pos + offset_of!(Self::Archived, translation),
|
|
||||||
resolver.1,
|
|
||||||
project_struct!(out: Self::Archived => translation),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Scalar + Serialize<S>, R: Serialize<S>, S: Fallible + ?Sized, const D: usize>
|
|
||||||
Serialize<S> for Isometry<T, R, D>
|
|
||||||
where
|
|
||||||
T::Archived: Scalar,
|
|
||||||
{
|
|
||||||
fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
|
|
||||||
Ok((
|
|
||||||
self.rotation.serialize(serializer)?,
|
|
||||||
self.translation.serialize(serializer)?,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Scalar + Archive, R: Archive, _D: Fallible + ?Sized, const D: usize>
|
|
||||||
Deserialize<Isometry<T, R, D>, _D> for Isometry<T::Archived, R::Archived, D>
|
|
||||||
where
|
|
||||||
T::Archived: Scalar + Deserialize<T, _D>,
|
|
||||||
R::Archived: Scalar + Deserialize<R, _D>,
|
|
||||||
{
|
|
||||||
fn deserialize(&self, deserializer: &mut _D) -> Result<Isometry<T, R, D>, _D::Error> {
|
|
||||||
Ok(Isometry {
|
|
||||||
rotation: self.rotation.deserialize(deserializer)?,
|
|
||||||
translation: self.translation.deserialize(deserializer)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Scalar + hash::Hash, R: hash::Hash, const D: usize> hash::Hash for Isometry<T, R, D>
|
impl<T: Scalar + hash::Hash, R: hash::Hash, const D: usize> hash::Hash for Isometry<T, R, D>
|
||||||
where
|
where
|
||||||
Owned<T, Const<D>>: hash::Hash,
|
Owned<T, Const<D>>: hash::Hash,
|
||||||
|
@ -19,6 +19,11 @@ use crate::geometry::{Point3, Projective3};
|
|||||||
|
|
||||||
/// A 3D orthographic projection stored as a homogeneous 4x4 matrix.
|
/// A 3D orthographic projection stored as a homogeneous 4x4 matrix.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
pub struct Orthographic3<T> {
|
pub struct Orthographic3<T> {
|
||||||
|
@ -20,6 +20,11 @@ use crate::geometry::{Point3, Projective3};
|
|||||||
|
|
||||||
/// A 3D perspective projection stored as a homogeneous 4x4 matrix.
|
/// A 3D perspective projection stored as a homogeneous 4x4 matrix.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
pub struct Perspective3<T> {
|
pub struct Perspective3<T> {
|
||||||
|
@ -36,6 +36,11 @@ use std::mem::MaybeUninit;
|
|||||||
/// of said transformations for details.
|
/// of said transformations for details.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
pub struct OPoint<T: Scalar, D: DimName>
|
pub struct OPoint<T: Scalar, D: DimName>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, D>,
|
DefaultAllocator: Allocator<T, D>,
|
||||||
|
@ -23,6 +23,11 @@ use crate::geometry::{Point3, Rotation};
|
|||||||
/// that may be used as a rotation.
|
/// that may be used as a rotation.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
||||||
pub struct Quaternion<T> {
|
pub struct Quaternion<T> {
|
||||||
/// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order.
|
/// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order.
|
||||||
@ -97,48 +102,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "rkyv-serialize-no-std")]
|
|
||||||
mod rkyv_impl {
|
|
||||||
use super::Quaternion;
|
|
||||||
use crate::base::Vector4;
|
|
||||||
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
|
|
||||||
|
|
||||||
impl<T: Archive> Archive for Quaternion<T> {
|
|
||||||
type Archived = Quaternion<T::Archived>;
|
|
||||||
type Resolver = <Vector4<T> as Archive>::Resolver;
|
|
||||||
|
|
||||||
fn resolve(
|
|
||||||
&self,
|
|
||||||
pos: usize,
|
|
||||||
resolver: Self::Resolver,
|
|
||||||
out: &mut core::mem::MaybeUninit<Self::Archived>,
|
|
||||||
) {
|
|
||||||
self.coords.resolve(
|
|
||||||
pos + offset_of!(Self::Archived, coords),
|
|
||||||
resolver,
|
|
||||||
project_struct!(out: Self::Archived => coords),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Serialize<S>, S: Fallible + ?Sized> Serialize<S> for Quaternion<T> {
|
|
||||||
fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
|
|
||||||
self.coords.serialize(serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Archive, D: Fallible + ?Sized> Deserialize<Quaternion<T>, D> for Quaternion<T::Archived>
|
|
||||||
where
|
|
||||||
T::Archived: Deserialize<T, D>,
|
|
||||||
{
|
|
||||||
fn deserialize(&self, deserializer: &mut D) -> Result<Quaternion<T>, D::Error> {
|
|
||||||
Ok(Quaternion {
|
|
||||||
coords: self.coords.deserialize(deserializer)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: SimdRealField> Quaternion<T>
|
impl<T: SimdRealField> Quaternion<T>
|
||||||
where
|
where
|
||||||
T::Element: SimdRealField,
|
T::Element: SimdRealField,
|
||||||
|
@ -49,6 +49,11 @@ use crate::geometry::Point;
|
|||||||
/// * [Conversion to a matrix <span style="float:right;">`matrix`, `to_homogeneous`…</span>](#conversion-to-a-matrix)
|
/// * [Conversion to a matrix <span style="float:right;">`matrix`, `to_homogeneous`…</span>](#conversion-to-a-matrix)
|
||||||
///
|
///
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
pub struct Rotation<T, const D: usize> {
|
pub struct Rotation<T, const D: usize> {
|
||||||
|
@ -17,6 +17,11 @@ use crate::geometry::Point;
|
|||||||
|
|
||||||
/// A scale which supports non-uniform scaling.
|
/// A scale which supports non-uniform scaling.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
pub struct Scale<T, const D: usize> {
|
pub struct Scale<T, const D: usize> {
|
||||||
@ -84,49 +89,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "rkyv-serialize-no-std")]
|
|
||||||
mod rkyv_impl {
|
|
||||||
use super::Scale;
|
|
||||||
use crate::base::SVector;
|
|
||||||
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
|
|
||||||
|
|
||||||
impl<T: Archive, const D: usize> Archive for Scale<T, D> {
|
|
||||||
type Archived = Scale<T::Archived, D>;
|
|
||||||
type Resolver = <SVector<T, D> as Archive>::Resolver;
|
|
||||||
|
|
||||||
fn resolve(
|
|
||||||
&self,
|
|
||||||
pos: usize,
|
|
||||||
resolver: Self::Resolver,
|
|
||||||
out: &mut core::mem::MaybeUninit<Self::Archived>,
|
|
||||||
) {
|
|
||||||
self.vector.resolve(
|
|
||||||
pos + offset_of!(Self::Archived, vector),
|
|
||||||
resolver,
|
|
||||||
project_struct!(out: Self::Archived => vector),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Serialize<S>, S: Fallible + ?Sized, const D: usize> Serialize<S> for Scale<T, D> {
|
|
||||||
fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
|
|
||||||
self.vector.serialize(serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Archive, _D: Fallible + ?Sized, const D: usize> Deserialize<Scale<T, D>, _D>
|
|
||||||
for Scale<T::Archived, D>
|
|
||||||
where
|
|
||||||
T::Archived: Deserialize<T, _D>,
|
|
||||||
{
|
|
||||||
fn deserialize(&self, deserializer: &mut _D) -> Result<Scale<T, D>, _D::Error> {
|
|
||||||
Ok(Scale {
|
|
||||||
vector: self.vector.deserialize(deserializer)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Scalar, const D: usize> Scale<T, D> {
|
impl<T: Scalar, const D: usize> Scale<T, D> {
|
||||||
/// Inverts `self`.
|
/// Inverts `self`.
|
||||||
///
|
///
|
||||||
|
@ -34,6 +34,11 @@ use crate::geometry::{AbstractRotation, Isometry, Point, Translation};
|
|||||||
DefaultAllocator: Allocator<T, Const<D>>,
|
DefaultAllocator: Allocator<T, Const<D>>,
|
||||||
Owned<T, Const<D>>: Deserialize<'de>"))
|
Owned<T, Const<D>>: Deserialize<'de>"))
|
||||||
)]
|
)]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
pub struct Similarity<T, R, const D: usize> {
|
pub struct Similarity<T, R, const D: usize> {
|
||||||
/// The part of this similarity that does not include the scaling factor.
|
/// The part of this similarity that does not include the scaling factor.
|
||||||
pub isometry: Isometry<T, R, D>,
|
pub isometry: Isometry<T, R, D>,
|
||||||
|
@ -17,6 +17,11 @@ use crate::geometry::Point;
|
|||||||
|
|
||||||
/// A translation.
|
/// A translation.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
|
#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "rkyv-serialize-no-std",
|
||||||
|
derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)
|
||||||
|
)]
|
||||||
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
pub struct Translation<T, const D: usize> {
|
pub struct Translation<T, const D: usize> {
|
||||||
@ -84,49 +89,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "rkyv-serialize-no-std")]
|
|
||||||
mod rkyv_impl {
|
|
||||||
use super::Translation;
|
|
||||||
use crate::base::SVector;
|
|
||||||
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
|
|
||||||
|
|
||||||
impl<T: Archive, const D: usize> Archive for Translation<T, D> {
|
|
||||||
type Archived = Translation<T::Archived, D>;
|
|
||||||
type Resolver = <SVector<T, D> as Archive>::Resolver;
|
|
||||||
|
|
||||||
fn resolve(
|
|
||||||
&self,
|
|
||||||
pos: usize,
|
|
||||||
resolver: Self::Resolver,
|
|
||||||
out: &mut core::mem::MaybeUninit<Self::Archived>,
|
|
||||||
) {
|
|
||||||
self.vector.resolve(
|
|
||||||
pos + offset_of!(Self::Archived, vector),
|
|
||||||
resolver,
|
|
||||||
project_struct!(out: Self::Archived => vector),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Serialize<S>, S: Fallible + ?Sized, const D: usize> Serialize<S> for Translation<T, D> {
|
|
||||||
fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
|
|
||||||
self.vector.serialize(serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Archive, _D: Fallible + ?Sized, const D: usize> Deserialize<Translation<T, D>, _D>
|
|
||||||
for Translation<T::Archived, D>
|
|
||||||
where
|
|
||||||
T::Archived: Deserialize<T, _D>,
|
|
||||||
{
|
|
||||||
fn deserialize(&self, deserializer: &mut _D) -> Result<Translation<T, D>, _D::Error> {
|
|
||||||
Ok(Translation {
|
|
||||||
vector: self.vector.deserialize(deserializer)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Scalar, const D: usize> Translation<T, D> {
|
impl<T: Scalar, const D: usize> Translation<T, D> {
|
||||||
/// Creates a new translation from the given vector.
|
/// Creates a new translation from the given vector.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -410,7 +410,8 @@ where
|
|||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn slerp(&self, other: &Self, t: T) -> Self {
|
pub fn slerp(&self, other: &Self, t: T) -> Self {
|
||||||
Self::new(self.angle() * (T::one() - t.clone()) + other.angle() * t)
|
let delta = other / self;
|
||||||
|
self * Self::new(delta.angle() * t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,12 +78,13 @@ an optimized set of tools for computer graphics and physics. Those features incl
|
|||||||
unused_mut,
|
unused_mut,
|
||||||
unused_parens,
|
unused_parens,
|
||||||
unused_qualifications,
|
unused_qualifications,
|
||||||
unused_results,
|
|
||||||
rust_2018_idioms,
|
rust_2018_idioms,
|
||||||
rust_2018_compatibility,
|
rust_2018_compatibility,
|
||||||
future_incompatible,
|
future_incompatible,
|
||||||
missing_copy_implementations
|
missing_copy_implementations
|
||||||
)]
|
)]
|
||||||
|
#![cfg_attr(feature = "rkyv-serialize-no-std", warn(unused_results))] // TODO: deny this once bytecheck stops generating warnings.
|
||||||
|
#![cfg_attr(not(feature = "rkyv-serialize-no-std"), deny(unused_results))]
|
||||||
#![doc(
|
#![doc(
|
||||||
html_favicon_url = "https://nalgebra.org/img/favicon.ico",
|
html_favicon_url = "https://nalgebra.org/img/favicon.ico",
|
||||||
html_root_url = "https://docs.rs/nalgebra/0.25.0"
|
html_root_url = "https://docs.rs/nalgebra/0.25.0"
|
||||||
|
66
src/third_party/glam/common/glam_isometry.rs
vendored
66
src/third_party/glam/common/glam_isometry.rs
vendored
@ -1,5 +1,5 @@
|
|||||||
use super::glam::{DMat3, DMat4, DQuat, DVec2, DVec3, Mat3, Mat4, Quat, Vec2, Vec3};
|
use super::glam::{DMat3, DMat4, DQuat, DVec2, DVec3, Mat3, Mat4, Quat, Vec2, Vec3};
|
||||||
use crate::{Isometry2, Isometry3, Matrix3, Matrix4, Translation3, UnitQuaternion, Vector2};
|
use crate::{Isometry2, Isometry3, Matrix3, Matrix4, Translation3, UnitQuaternion};
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
|
|
||||||
impl From<Isometry2<f32>> for Mat3 {
|
impl From<Isometry2<f32>> for Mat3 {
|
||||||
@ -36,18 +36,18 @@ impl From<Isometry3<f64>> for (DVec3, DQuat) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Isometry2<f32>> for (Vec3, Quat) {
|
impl From<Isometry2<f32>> for (Vec2, f32) {
|
||||||
fn from(iso: Isometry2<f32>) -> (Vec3, Quat) {
|
fn from(iso: Isometry2<f32>) -> (Vec2, f32) {
|
||||||
let tra = Vec3::new(iso.translation.x, iso.translation.y, 0.0);
|
let tra = Vec2::new(iso.translation.x, iso.translation.y);
|
||||||
let rot = Quat::from_axis_angle(Vec3::Z, iso.rotation.angle());
|
let rot = iso.rotation.angle();
|
||||||
(tra, rot)
|
(tra, rot)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Isometry2<f64>> for (DVec3, DQuat) {
|
impl From<Isometry2<f64>> for (DVec2, f64) {
|
||||||
fn from(iso: Isometry2<f64>) -> (DVec3, DQuat) {
|
fn from(iso: Isometry2<f64>) -> (DVec2, f64) {
|
||||||
let tra = DVec3::new(iso.translation.x, iso.translation.y, 0.0);
|
let tra = DVec2::new(iso.translation.x, iso.translation.y);
|
||||||
let rot = DQuat::from_axis_angle(DVec3::Z, iso.rotation.angle());
|
let rot = iso.rotation.angle();
|
||||||
(tra, rot)
|
(tra, rot)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -64,30 +64,6 @@ impl From<(DVec3, DQuat)> for Isometry3<f64> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<(Vec3, Quat)> for Isometry2<f32> {
|
|
||||||
fn from((tra, rot): (Vec3, Quat)) -> Self {
|
|
||||||
Isometry2::new([tra.x, tra.y].into(), rot.to_axis_angle().1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<(DVec3, DQuat)> for Isometry2<f64> {
|
|
||||||
fn from((tra, rot): (DVec3, DQuat)) -> Self {
|
|
||||||
Isometry2::new([tra.x, tra.y].into(), rot.to_axis_angle().1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<(Vec2, Quat)> for Isometry2<f32> {
|
|
||||||
fn from((tra, rot): (Vec2, Quat)) -> Self {
|
|
||||||
Isometry2::new(tra.into(), rot.to_axis_angle().1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<(DVec2, DQuat)> for Isometry2<f64> {
|
|
||||||
fn from((tra, rot): (DVec2, DQuat)) -> Self {
|
|
||||||
Isometry2::new(tra.into(), rot.to_axis_angle().1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<(Vec2, f32)> for Isometry2<f32> {
|
impl From<(Vec2, f32)> for Isometry2<f32> {
|
||||||
fn from((tra, rot): (Vec2, f32)) -> Self {
|
fn from((tra, rot): (Vec2, f32)) -> Self {
|
||||||
Isometry2::new(tra.into(), rot)
|
Isometry2::new(tra.into(), rot)
|
||||||
@ -112,18 +88,6 @@ impl From<DQuat> for Isometry3<f64> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Quat> for Isometry2<f32> {
|
|
||||||
fn from(rot: Quat) -> Self {
|
|
||||||
Isometry2::new(Vector2::zeros(), rot.to_axis_angle().1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<DQuat> for Isometry2<f64> {
|
|
||||||
fn from(rot: DQuat) -> Self {
|
|
||||||
Isometry2::new(Vector2::zeros(), rot.to_axis_angle().1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Vec3> for Isometry3<f32> {
|
impl From<Vec3> for Isometry3<f32> {
|
||||||
fn from(tra: Vec3) -> Self {
|
fn from(tra: Vec3) -> Self {
|
||||||
Isometry3::from_parts(tra.into(), UnitQuaternion::identity())
|
Isometry3::from_parts(tra.into(), UnitQuaternion::identity())
|
||||||
@ -148,18 +112,6 @@ impl From<DVec2> for Isometry2<f64> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Vec3> for Isometry2<f32> {
|
|
||||||
fn from(tra: Vec3) -> Self {
|
|
||||||
Isometry2::new([tra.x, tra.y].into(), 0.0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<DVec3> for Isometry2<f64> {
|
|
||||||
fn from(tra: DVec3) -> Self {
|
|
||||||
Isometry2::new([tra.x, tra.y].into(), 0.0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<Mat3> for Isometry2<f32> {
|
impl TryFrom<Mat3> for Isometry2<f32> {
|
||||||
type Error = ();
|
type Error = ();
|
||||||
|
|
||||||
|
78
src/third_party/glam/common/glam_matrix.rs
vendored
78
src/third_party/glam/common/glam_matrix.rs
vendored
@ -3,7 +3,11 @@ use super::glam::{
|
|||||||
Mat4, UVec2, UVec3, UVec4, Vec2, Vec3, Vec3A, Vec4,
|
Mat4, UVec2, UVec3, UVec4, Vec2, Vec3, Vec3A, Vec4,
|
||||||
};
|
};
|
||||||
use crate::storage::RawStorage;
|
use crate::storage::RawStorage;
|
||||||
use crate::{Matrix, Matrix2, Matrix3, Matrix4, Vector, Vector2, Vector3, Vector4, U2, U3, U4};
|
use crate::{
|
||||||
|
Matrix, Matrix2, Matrix3, Matrix4, Unit, UnitVector2, UnitVector3, UnitVector4, Vector,
|
||||||
|
Vector2, Vector3, Vector4, U2, U3, U4,
|
||||||
|
};
|
||||||
|
use std::convert::TryFrom;
|
||||||
|
|
||||||
macro_rules! impl_vec_conversion(
|
macro_rules! impl_vec_conversion(
|
||||||
($N: ty, $Vec2: ty, $Vec3: ty, $Vec4: ty) => {
|
($N: ty, $Vec2: ty, $Vec3: ty, $Vec4: ty) => {
|
||||||
@ -66,6 +70,63 @@ impl_vec_conversion!(i32, IVec2, IVec3, IVec4);
|
|||||||
impl_vec_conversion!(u32, UVec2, UVec3, UVec4);
|
impl_vec_conversion!(u32, UVec2, UVec3, UVec4);
|
||||||
impl_vec_conversion!(bool, BVec2, BVec3, BVec4);
|
impl_vec_conversion!(bool, BVec2, BVec3, BVec4);
|
||||||
|
|
||||||
|
const ERR: &'static str = "Normalization failed.";
|
||||||
|
|
||||||
|
macro_rules! impl_unit_vec_conversion(
|
||||||
|
($N: ty, $Vec2: ty, $Vec3: ty, $Vec4: ty) => {
|
||||||
|
impl TryFrom<$Vec2> for UnitVector2<$N> {
|
||||||
|
type Error = &'static str;
|
||||||
|
#[inline]
|
||||||
|
fn try_from(e: $Vec2) -> Result<Self, Self::Error> {
|
||||||
|
Unit::try_new(e.into(), 0.0).ok_or(ERR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<UnitVector2<$N>> for $Vec2
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn from(e: UnitVector2<$N>) -> $Vec2 {
|
||||||
|
e.into_inner().into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<$Vec3> for UnitVector3<$N> {
|
||||||
|
type Error = &'static str;
|
||||||
|
#[inline]
|
||||||
|
fn try_from(e: $Vec3) -> Result<Self, Self::Error> {
|
||||||
|
Unit::try_new(e.into(), 0.0).ok_or(ERR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<UnitVector3<$N>> for $Vec3
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn from(e: UnitVector3<$N>) -> $Vec3 {
|
||||||
|
e.into_inner().into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<$Vec4> for UnitVector4<$N> {
|
||||||
|
type Error = &'static str;
|
||||||
|
#[inline]
|
||||||
|
fn try_from(e: $Vec4) -> Result<Self, Self::Error> {
|
||||||
|
Unit::try_new(e.into(), 0.0).ok_or(ERR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<UnitVector4<$N>> for $Vec4
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn from(e: UnitVector4<$N>) -> $Vec4 {
|
||||||
|
e.into_inner().into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
impl_unit_vec_conversion!(f32, Vec2, Vec3, Vec4);
|
||||||
|
impl_unit_vec_conversion!(f64, DVec2, DVec3, DVec4);
|
||||||
|
|
||||||
impl From<Vec3A> for Vector3<f32> {
|
impl From<Vec3A> for Vector3<f32> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(e: Vec3A) -> Vector3<f32> {
|
fn from(e: Vec3A) -> Vector3<f32> {
|
||||||
@ -83,6 +144,21 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl TryFrom<Vec3A> for UnitVector3<f32> {
|
||||||
|
type Error = &'static str;
|
||||||
|
#[inline]
|
||||||
|
fn try_from(e: Vec3A) -> Result<Self, Self::Error> {
|
||||||
|
Unit::try_new(e.into(), 0.0).ok_or(ERR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<UnitVector3<f32>> for Vec3A {
|
||||||
|
#[inline]
|
||||||
|
fn from(e: UnitVector3<f32>) -> Vec3A {
|
||||||
|
e.into_inner().into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<Mat2> for Matrix2<f32> {
|
impl From<Mat2> for Matrix2<f32> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(e: Mat2) -> Matrix2<f32> {
|
fn from(e: Mat2) -> Matrix2<f32> {
|
||||||
|
2
src/third_party/glam/mod.rs
vendored
2
src/third_party/glam/mod.rs
vendored
@ -1,5 +1,3 @@
|
|||||||
#[cfg(feature = "glam013")]
|
|
||||||
mod v013;
|
|
||||||
#[cfg(feature = "glam014")]
|
#[cfg(feature = "glam014")]
|
||||||
mod v014;
|
mod v014;
|
||||||
#[cfg(feature = "glam015")]
|
#[cfg(feature = "glam015")]
|
||||||
|
18
src/third_party/glam/v013/mod.rs
vendored
18
src/third_party/glam/v013/mod.rs
vendored
@ -1,18 +0,0 @@
|
|||||||
#[path = "../common/glam_isometry.rs"]
|
|
||||||
mod glam_isometry;
|
|
||||||
#[path = "../common/glam_matrix.rs"]
|
|
||||||
mod glam_matrix;
|
|
||||||
#[path = "../common/glam_point.rs"]
|
|
||||||
mod glam_point;
|
|
||||||
#[path = "../common/glam_quaternion.rs"]
|
|
||||||
mod glam_quaternion;
|
|
||||||
#[path = "../common/glam_rotation.rs"]
|
|
||||||
mod glam_rotation;
|
|
||||||
#[path = "../common/glam_similarity.rs"]
|
|
||||||
mod glam_similarity;
|
|
||||||
#[path = "../common/glam_translation.rs"]
|
|
||||||
mod glam_translation;
|
|
||||||
#[path = "../common/glam_unit_complex.rs"]
|
|
||||||
mod glam_unit_complex;
|
|
||||||
|
|
||||||
pub(self) use glam013 as glam;
|
|
@ -32,7 +32,9 @@ fn quaternion_euler_angles_issue_494() {
|
|||||||
|
|
||||||
#[cfg(feature = "proptest-support")]
|
#[cfg(feature = "proptest-support")]
|
||||||
mod proptest_tests {
|
mod proptest_tests {
|
||||||
|
use approx::AbsDiffEq;
|
||||||
use na::{self, Rotation2, Rotation3, Unit};
|
use na::{self, Rotation2, Rotation3, Unit};
|
||||||
|
use na::{UnitComplex, UnitQuaternion};
|
||||||
use simba::scalar::RealField;
|
use simba::scalar::RealField;
|
||||||
use std::f64;
|
use std::f64;
|
||||||
|
|
||||||
@ -229,5 +231,74 @@ mod proptest_tests {
|
|||||||
prop_assert_eq!(r, Rotation3::identity())
|
prop_assert_eq!(r, Rotation3::identity())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
//In general, `slerp(a,b,t)` should equal `(b/a)^t * a` even though in practice,
|
||||||
|
//we may not use that formula directly for complex numbers or quaternions
|
||||||
|
//
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn slerp_powf_agree_2(a in unit_complex(), b in unit_complex(), t in PROPTEST_F64) {
|
||||||
|
let z1 = a.slerp(&b, t);
|
||||||
|
let z2 = (b/a).powf(t) * a;
|
||||||
|
prop_assert!(relative_eq!(z1,z2,epsilon=1e-10));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn slerp_powf_agree_3(a in unit_quaternion(), b in unit_quaternion(), t in PROPTEST_F64) {
|
||||||
|
if let Some(z1) = a.try_slerp(&b, t, f64::default_epsilon()) {
|
||||||
|
let z2 = (b/a).powf(t) * a;
|
||||||
|
prop_assert!(relative_eq!(z1,z2,epsilon=1e-10));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
//when not antipodal, slerp should always take the shortest path between two orientations
|
||||||
|
//
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn slerp_takes_shortest_path_2(
|
||||||
|
z in unit_complex(), dtheta in -f64::pi()..f64::pi(), t in 0.0..1.0f64
|
||||||
|
) {
|
||||||
|
|
||||||
|
//ambiguous when at ends of angle range, so we don't really care here
|
||||||
|
if dtheta.abs() != f64::pi() {
|
||||||
|
|
||||||
|
//make two complex numbers separated by an angle between -pi and pi
|
||||||
|
let (z1, z2) = (z, z * UnitComplex::new(dtheta));
|
||||||
|
let z3 = z1.slerp(&z2, t);
|
||||||
|
|
||||||
|
//since the angle is no larger than a half-turn, and t is between 0 and 1,
|
||||||
|
//the shortest path just corresponds to adding the scaled angle
|
||||||
|
let a1 = z3.angle();
|
||||||
|
let a2 = na::wrap(z1.angle() + dtheta*t, -f64::pi(), f64::pi());
|
||||||
|
|
||||||
|
prop_assert!(relative_eq!(a1, a2, epsilon=1e-10));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn slerp_takes_shortest_path_3(
|
||||||
|
q in unit_quaternion(), dtheta in -f64::pi()..f64::pi(), t in 0.0..1.0f64
|
||||||
|
) {
|
||||||
|
|
||||||
|
//ambiguous when at ends of angle range, so we don't really care here
|
||||||
|
if let Some(axis) = q.axis() {
|
||||||
|
|
||||||
|
//make two quaternions separated by an angle between -pi and pi
|
||||||
|
let (q1, q2) = (q, q * UnitQuaternion::from_axis_angle(&axis, dtheta));
|
||||||
|
let q3 = q1.slerp(&q2, t);
|
||||||
|
|
||||||
|
//since the angle is no larger than a half-turn, and t is between 0 and 1,
|
||||||
|
//the shortest path just corresponds to adding the scaled angle
|
||||||
|
let q4 = q1 * UnitQuaternion::from_axis_angle(&axis, dtheta*t);
|
||||||
|
prop_assert!(relative_eq!(q3, q4, epsilon=1e-10));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user