Merge remote-tracking branch 'origin/dev' into dev

This commit is contained in:
Sébastien Crozet 2021-11-23 11:02:51 +01:00
commit ad3eefe182
10 changed files with 426 additions and 27 deletions

View File

@ -106,3 +106,7 @@ jobs:
run: xargo build --verbose --no-default-features --features alloc --target=x86_64-unknown-linux-gnu;
- name: build thumbv7em-none-eabihf
run: xargo build --verbose --no-default-features --target=thumbv7em-none-eabihf;
- name: build x86_64-unknown-linux-gnu nalgebra-glm
run: xargo build --verbose --no-default-features -p nalgebra-glm --target=x86_64-unknown-linux-gnu;
- name: build thumbv7em-none-eabihf nalgebra-glm
run: xargo build --verbose --no-default-features -p nalgebra-glm --target=thumbv7em-none-eabihf;

View File

@ -1,9 +1,9 @@
use approx::AbsDiffEq;
use num::{Bounded, Signed};
use core::cmp::PartialOrd;
use na::Scalar;
use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub, RealField};
use std::cmp::PartialOrd;
/// A number that can either be an integer or a float.
pub trait Number:

View File

@ -73,6 +73,9 @@
html_root_url = "https://nalgebra.org/rustdoc"
)]
extern crate lapack;
extern crate lapack_src;
extern crate nalgebra as na;
extern crate num_traits as num;

View File

@ -6,9 +6,6 @@ compile_error!("Tests must be run with `proptest-support`");
extern crate nalgebra as na;
extern crate nalgebra_lapack as nl;
extern crate lapack;
extern crate lapack_src;
mod linalg;
#[path = "../../tests/proptest/mod.rs"]
mod proptest;

View File

@ -23,7 +23,7 @@ use crate::base::{
use crate::base::{DVector, RowDVector, VecStorage};
use crate::base::{SliceStorage, SliceStorageMut};
use crate::constraint::DimEq;
use crate::{IsNotStaticOne, RowSVector, SMatrix, SVector};
use crate::{IsNotStaticOne, RowSVector, SMatrix, SVector, VectorSlice, VectorSliceMut};
use std::mem::MaybeUninit;
// TODO: too bad this won't work for slice conversions.
@ -125,6 +125,24 @@ impl<T: Scalar, const D: usize> From<SVector<T, D>> for [T; D] {
}
}
impl<'a, T: Scalar, RStride: Dim, CStride: Dim, const D: usize>
From<VectorSlice<'a, T, Const<D>, RStride, CStride>> for [T; D]
{
#[inline]
fn from(vec: VectorSlice<'a, T, Const<D>, RStride, CStride>) -> Self {
vec.into_owned().into()
}
}
impl<'a, T: Scalar, RStride: Dim, CStride: Dim, const D: usize>
From<VectorSliceMut<'a, T, Const<D>, RStride, CStride>> for [T; D]
{
#[inline]
fn from(vec: VectorSliceMut<'a, T, Const<D>, RStride, CStride>) -> Self {
vec.into_owned().into()
}
}
impl<T: Scalar, const D: usize> From<[T; D]> for RowSVector<T, D>
where
Const<D>: IsNotStaticOne,
@ -197,8 +215,26 @@ impl<T: Scalar, const R: usize, const C: usize> From<[[T; R]; C]> for SMatrix<T,
impl<T: Scalar, const R: usize, const C: usize> From<SMatrix<T, R, C>> for [[T; R]; C] {
#[inline]
fn from(vec: SMatrix<T, R, C>) -> Self {
vec.data.0
fn from(mat: SMatrix<T, R, C>) -> Self {
mat.data.0
}
}
impl<'a, T: Scalar, RStride: Dim, CStride: Dim, const R: usize, const C: usize>
From<MatrixSlice<'a, T, Const<R>, Const<C>, RStride, CStride>> for [[T; R]; C]
{
#[inline]
fn from(mat: MatrixSlice<'a, T, Const<R>, Const<C>, RStride, CStride>) -> Self {
mat.into_owned().into()
}
}
impl<'a, T: Scalar, RStride: Dim, CStride: Dim, const R: usize, const C: usize>
From<MatrixSliceMut<'a, T, Const<R>, Const<C>, RStride, CStride>> for [[T; R]; C]
{
#[inline]
fn from(mat: MatrixSliceMut<'a, T, Const<R>, Const<C>, RStride, CStride>) -> Self {
mat.into_owned().into()
}
}

View File

@ -357,7 +357,8 @@ impl<T: RealField + UlpsEq<Epsilon = T>> UlpsEq for DualQuaternion<T> {
}
}
/// A unit quaternions. May be used to represent a rotation followed by a translation.
/// A unit dual quaternion. May be used to represent a rotation followed by a
/// translation.
pub type UnitDualQuaternion<T> = Unit<DualQuaternion<T>>;
impl<T: Scalar + ClosedNeg + PartialEq + SimdRealField> PartialEq for UnitDualQuaternion<T> {
@ -593,8 +594,9 @@ where
/// Screw linear interpolation between two unit quaternions. This creates a
/// smooth arc from one dual-quaternion to another.
///
/// Panics if the angle between both quaternion is 180 degrees (in which case the interpolation
/// is not well-defined). Use `.try_sclerp` instead to avoid the panic.
/// Panics if the angle between both quaternion is 180 degrees (in which
/// case the interpolation is not well-defined). Use `.try_sclerp`
/// instead to avoid the panic.
///
/// # Example
/// ```
@ -627,15 +629,16 @@ where
.expect("DualQuaternion sclerp: ambiguous configuration.")
}
/// Computes the screw-linear interpolation between two unit quaternions or returns `None`
/// if both quaternions are approximately 180 degrees apart (in which case the interpolation is
/// not well-defined).
/// Computes the screw-linear interpolation between two unit quaternions or
/// returns `None` if both quaternions are approximately 180 degrees
/// apart (in which case the interpolation is not well-defined).
///
/// # Arguments
/// * `self`: the first quaternion to interpolate from.
/// * `other`: the second quaternion to interpolate toward.
/// * `t`: the interpolation parameter. Should be between 0 and 1.
/// * `epsilon`: the value below which the sinus of the angle separating both quaternion
/// * `epsilon`: the value below which the sinus of the angle separating
/// both quaternion
/// must be to return `None`.
#[inline]
#[must_use]
@ -650,6 +653,10 @@ where
// interpolation.
let other = {
let dot_product = self.as_ref().real.coords.dot(&other.as_ref().real.coords);
if relative_eq!(dot_product, T::zero(), epsilon = epsilon.clone()) {
return None;
}
if dot_product < T::zero() {
-other.clone()
} else {
@ -660,13 +667,21 @@ where
let difference = self.as_ref().conjugate() * other.as_ref();
let norm_squared = difference.real.vector().norm_squared();
if relative_eq!(norm_squared, T::zero(), epsilon = epsilon) {
return None;
return Some(Self::from_parts(
self.translation()
.vector
.lerp(&other.translation().vector, t)
.into(),
self.rotation(),
));
}
let inverse_norm_squared = T::one() / norm_squared;
let scalar: T = difference.real.scalar();
let mut angle = two.clone() * scalar.acos();
let inverse_norm_squared: T = T::one() / norm_squared;
let inverse_norm = inverse_norm_squared.sqrt();
let mut angle = two.clone() * difference.real.scalar().acos();
let mut pitch = -two * difference.dual.scalar() * inverse_norm.clone();
let direction = difference.real.vector() * inverse_norm.clone();
let moment = (difference.dual.vector()
@ -678,6 +693,7 @@ where
let sin = (half.clone() * angle.clone()).sin();
let cos = (half.clone() * angle).cos();
let real = Quaternion::from_parts(cos.clone(), direction.clone() * sin.clone());
let dual = Quaternion::from_parts(
-pitch.clone() * half.clone() * sin.clone(),

View File

@ -74,7 +74,31 @@ impl<T: ComplexField, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
}
/// Computes the Singular Value Decomposition using implicit shift.
/// The singular values are guaranteed to be sorted in descending order.
/// If this order is not required consider using `svd_unordered`.
pub fn svd(self, compute_u: bool, compute_v: bool) -> SVD<T, R, C>
where
R: DimMin<C>,
DimMinimum<R, C>: DimSub<U1>, // for Bidiagonal.
DefaultAllocator: Allocator<T, R, C>
+ Allocator<T, C>
+ Allocator<T, R>
+ Allocator<T, DimDiff<DimMinimum<R, C>, U1>>
+ Allocator<T, DimMinimum<R, C>, C>
+ Allocator<T, R, DimMinimum<R, C>>
+ Allocator<T, DimMinimum<R, C>>
+ Allocator<T::RealField, DimMinimum<R, C>>
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>
+ Allocator<(usize, usize), DimMinimum<R, C>>
+ Allocator<(T::RealField, usize), DimMinimum<R, C>>,
{
SVD::new(self.into_owned(), compute_u, compute_v)
}
/// Computes the Singular Value Decomposition using implicit shift.
/// The singular values are not guaranteed to be sorted in any particular order.
/// If a descending order is required, consider using `svd` instead.
pub fn svd_unordered(self, compute_u: bool, compute_v: bool) -> SVD<T, R, C>
where
R: DimMin<C>,
DimMinimum<R, C>: DimSub<U1>, // for Bidiagonal.
@ -88,10 +112,12 @@ impl<T: ComplexField, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
+ Allocator<T::RealField, DimMinimum<R, C>>
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
{
SVD::new(self.into_owned(), compute_u, compute_v)
SVD::new_unordered(self.into_owned(), compute_u, compute_v)
}
/// Attempts to compute the Singular Value Decomposition of `matrix` using implicit shift.
/// The singular values are guaranteed to be sorted in descending order.
/// If this order is not required consider using `try_svd_unordered`.
///
/// # Arguments
///
@ -119,10 +145,47 @@ impl<T: ComplexField, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
+ Allocator<T, R, DimMinimum<R, C>>
+ Allocator<T, DimMinimum<R, C>>
+ Allocator<T::RealField, DimMinimum<R, C>>
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>
+ Allocator<(usize, usize), DimMinimum<R, C>>
+ Allocator<(T::RealField, usize), DimMinimum<R, C>>,
{
SVD::try_new(self.into_owned(), compute_u, compute_v, eps, max_niter)
}
/// Attempts to compute the Singular Value Decomposition of `matrix` using implicit shift.
/// The singular values are not guaranteed to be sorted in any particular order.
/// If a descending order is required, consider using `try_svd` instead.
///
/// # Arguments
///
/// * `compute_u` set this to `true` to enable the computation of left-singular vectors.
/// * `compute_v` set this to `true` to enable the computation of right-singular vectors.
/// * `eps` tolerance used to determine when a value converged to 0.
/// * `max_niter` maximum total number of iterations performed by the algorithm. If this
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
/// continues indefinitely until convergence.
pub fn try_svd_unordered(
self,
compute_u: bool,
compute_v: bool,
eps: T::RealField,
max_niter: usize,
) -> Option<SVD<T, R, C>>
where
R: DimMin<C>,
DimMinimum<R, C>: DimSub<U1>, // for Bidiagonal.
DefaultAllocator: Allocator<T, R, C>
+ Allocator<T, C>
+ Allocator<T, R>
+ Allocator<T, DimDiff<DimMinimum<R, C>, U1>>
+ Allocator<T, DimMinimum<R, C>, C>
+ Allocator<T, R, DimMinimum<R, C>>
+ Allocator<T, DimMinimum<R, C>>
+ Allocator<T::RealField, DimMinimum<R, C>>
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
{
SVD::try_new_unordered(self.into_owned(), compute_u, compute_v, eps, max_niter)
}
}
/// # Square matrix decomposition

View File

@ -9,6 +9,7 @@ use crate::base::{DefaultAllocator, Matrix, Matrix2x3, OMatrix, OVector, Vector2
use crate::constraint::{SameNumberOfRows, ShapeConstraint};
use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1};
use crate::storage::Storage;
use crate::RawStorage;
use simba::scalar::{ComplexField, RealField};
use crate::linalg::givens::GivensRotation;
@ -79,8 +80,10 @@ where
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
{
/// Computes the Singular Value Decomposition of `matrix` using implicit shift.
pub fn new(matrix: OMatrix<T, R, C>, compute_u: bool, compute_v: bool) -> Self {
Self::try_new(
/// The singular values are not guaranteed to be sorted in any particular order.
/// If a descending order is required, consider using `new` instead.
pub fn new_unordered(matrix: OMatrix<T, R, C>, compute_u: bool, compute_v: bool) -> Self {
Self::try_new_unordered(
matrix,
compute_u,
compute_v,
@ -91,6 +94,8 @@ where
}
/// Attempts to compute the Singular Value Decomposition of `matrix` using implicit shift.
/// The singular values are not guaranteed to be sorted in any particular order.
/// If a descending order is required, consider using `try_new` instead.
///
/// # Arguments
///
@ -100,7 +105,7 @@ where
/// * `max_niter` maximum total number of iterations performed by the algorithm. If this
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
/// continues indefinitely until convergence.
pub fn try_new(
pub fn try_new_unordered(
mut matrix: OMatrix<T, R, C>,
compute_u: bool,
compute_v: bool,
@ -612,6 +617,114 @@ where
}
}
impl<T: ComplexField, R: DimMin<C>, C: Dim> SVD<T, R, C>
where
DimMinimum<R, C>: DimSub<U1>, // for Bidiagonal.
DefaultAllocator: Allocator<T, R, C>
+ Allocator<T, C>
+ Allocator<T, R>
+ Allocator<T, DimDiff<DimMinimum<R, C>, U1>>
+ Allocator<T, DimMinimum<R, C>, C>
+ Allocator<T, R, DimMinimum<R, C>>
+ Allocator<T, DimMinimum<R, C>>
+ Allocator<T::RealField, DimMinimum<R, C>>
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>
+ Allocator<(usize, usize), DimMinimum<R, C>> // for sorted singular values
+ Allocator<(T::RealField, usize), DimMinimum<R, C>>, // for sorted singular values
{
/// Computes the Singular Value Decomposition of `matrix` using implicit shift.
/// The singular values are guaranteed to be sorted in descending order.
/// If this order is not required consider using `new_unordered`.
pub fn new(matrix: OMatrix<T, R, C>, compute_u: bool, compute_v: bool) -> Self {
let mut svd = Self::new_unordered(matrix, compute_u, compute_v);
svd.sort_by_singular_values();
svd
}
/// Attempts to compute the Singular Value Decomposition of `matrix` using implicit shift.
/// The singular values are guaranteed to be sorted in descending order.
/// If this order is not required consider using `try_new_unordered`.
///
/// # Arguments
///
/// * `compute_u` set this to `true` to enable the computation of left-singular vectors.
/// * `compute_v` set this to `true` to enable the computation of right-singular vectors.
/// * `eps` tolerance used to determine when a value converged to 0.
/// * `max_niter` maximum total number of iterations performed by the algorithm. If this
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
/// continues indefinitely until convergence.
pub fn try_new(
matrix: OMatrix<T, R, C>,
compute_u: bool,
compute_v: bool,
eps: T::RealField,
max_niter: usize,
) -> Option<Self> {
Self::try_new_unordered(matrix, compute_u, compute_v, eps, max_niter).map(|mut svd| {
svd.sort_by_singular_values();
svd
})
}
/// Sort the estimated components of the SVD by its singular values in descending order.
/// Such an ordering is often implicitly required when the decompositions are used for estimation or fitting purposes.
/// Using this function is only required if `new_unordered` or `try_new_unorderd` were used and the specific sorting is required afterward.
pub fn sort_by_singular_values(&mut self) {
const VALUE_PROCESSED: usize = usize::MAX;
// Collect the singular values with their original index, ...
let mut singular_values = self.singular_values.map_with_location(|r, _, e| (e, r));
assert_ne!(
singular_values.data.shape().0.value(),
VALUE_PROCESSED,
"Too many singular values"
);
// ... sort the singular values, ...
singular_values
.as_mut_slice()
.sort_unstable_by(|(a, _), (b, _)| b.partial_cmp(a).expect("Singular value was NaN"));
// ... and store them.
self.singular_values
.zip_apply(&singular_values, |value, (new_value, _)| {
value.clone_from(&new_value)
});
// Calculate required permutations given the sorted indices.
// We need to identify all circles to calculate the required swaps.
let mut permutations =
crate::PermutationSequence::identity_generic(singular_values.data.shape().0);
for i in 0..singular_values.len() {
let mut index_1 = i;
let mut index_2 = singular_values[i].1;
// Check whether the value was already visited ...
while index_2 != VALUE_PROCESSED // ... or a "double swap" must be avoided.
&& singular_values[index_2].1 != VALUE_PROCESSED
{
// Add the permutation ...
permutations.append_permutation(index_1, index_2);
// ... and mark the value as visited.
singular_values[index_1].1 = VALUE_PROCESSED;
index_1 = index_2;
index_2 = singular_values[index_1].1;
}
}
// Permute the optional components
if let Some(u) = self.u.as_mut() {
permutations.permute_columns(u);
}
if let Some(v_t) = self.v_t.as_mut() {
permutations.permute_rows(v_t);
}
}
}
impl<T: ComplexField, R: DimMin<C>, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S>
where
DimMinimum<R, C>: DimSub<U1>, // for Bidiagonal.
@ -626,9 +739,11 @@ where
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
{
/// Computes the singular values of this matrix.
/// The singular values are not guaranteed to be sorted in any particular order.
/// If a descending order is required, consider using `singular_values` instead.
#[must_use]
pub fn singular_values(&self) -> OVector<T::RealField, DimMinimum<R, C>> {
SVD::new(self.clone_owned(), false, false).singular_values
pub fn singular_values_unordered(&self) -> OVector<T::RealField, DimMinimum<R, C>> {
SVD::new_unordered(self.clone_owned(), false, false).singular_values
}
/// Computes the rank of this matrix.
@ -636,7 +751,7 @@ where
/// All singular values below `eps` are considered equal to 0.
#[must_use]
pub fn rank(&self, eps: T::RealField) -> usize {
let svd = SVD::new(self.clone_owned(), false, false);
let svd = SVD::new_unordered(self.clone_owned(), false, false);
svd.rank(eps)
}
@ -647,7 +762,31 @@ where
where
DefaultAllocator: Allocator<T, C, R>,
{
SVD::new(self.clone_owned(), true, true).pseudo_inverse(eps)
SVD::new_unordered(self.clone_owned(), true, true).pseudo_inverse(eps)
}
}
impl<T: ComplexField, R: DimMin<C>, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S>
where
DimMinimum<R, C>: DimSub<U1>,
DefaultAllocator: Allocator<T, R, C>
+ Allocator<T, C>
+ Allocator<T, R>
+ Allocator<T, DimDiff<DimMinimum<R, C>, U1>>
+ Allocator<T, DimMinimum<R, C>, C>
+ Allocator<T, R, DimMinimum<R, C>>
+ Allocator<T, DimMinimum<R, C>>
+ Allocator<T::RealField, DimMinimum<R, C>>
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>
+ Allocator<(usize, usize), DimMinimum<R, C>>
+ Allocator<(T::RealField, usize), DimMinimum<R, C>>,
{
/// Computes the singular values of this matrix.
/// The singular values are guaranteed to be sorted in descending order.
/// If this order is not required consider using `singular_values_unordered`.
#[must_use]
pub fn singular_values(&self) -> OVector<T::RealField, DimMinimum<R, C>> {
SVD::new(self.clone_owned(), false, false).singular_values
}
}

View File

@ -1,7 +1,7 @@
#![cfg(feature = "proptest-support")]
#![allow(non_snake_case)]
use na::{DualQuaternion, Point3, UnitDualQuaternion, Vector3};
use na::{DualQuaternion, Point3, Unit, UnitDualQuaternion, UnitQuaternion, Vector3};
use crate::proptest::*;
use proptest::{prop_assert, proptest};
@ -74,6 +74,98 @@ proptest!(
prop_assert!(relative_eq!((dq * t) * p, dq * (t * p), epsilon = 1.0e-7));
}
#[cfg_attr(rustfmt, rustfmt_skip)]
#[test]
fn sclerp_is_defined_for_identical_orientations(
dq in unit_dual_quaternion(),
s in -1.0f64..2.0f64,
t in translation3(),
) {
// Should not panic.
prop_assert!(relative_eq!(dq.sclerp(&dq, 0.0), dq, epsilon = 1.0e-7));
prop_assert!(relative_eq!(dq.sclerp(&dq, 0.5), dq, epsilon = 1.0e-7));
prop_assert!(relative_eq!(dq.sclerp(&dq, 1.0), dq, epsilon = 1.0e-7));
prop_assert!(relative_eq!(dq.sclerp(&dq, s), dq, epsilon = 1.0e-7));
let unit = UnitDualQuaternion::identity();
prop_assert!(relative_eq!(unit.sclerp(&unit, 0.0), unit, epsilon = 1.0e-7));
prop_assert!(relative_eq!(unit.sclerp(&unit, 0.5), unit, epsilon = 1.0e-7));
prop_assert!(relative_eq!(unit.sclerp(&unit, 1.0), unit, epsilon = 1.0e-7));
prop_assert!(relative_eq!(unit.sclerp(&unit, s), unit, epsilon = 1.0e-7));
// ScLERPing two unit dual quaternions with nearly equal rotation
// components should result in a unit dual quaternion with a rotation
// component nearly equal to either input.
let dq2 = t * dq;
prop_assert!(relative_eq!(dq.sclerp(&dq2, 0.0).real, dq.real, epsilon = 1.0e-7));
prop_assert!(relative_eq!(dq.sclerp(&dq2, 0.5).real, dq.real, epsilon = 1.0e-7));
prop_assert!(relative_eq!(dq.sclerp(&dq2, 1.0).real, dq.real, epsilon = 1.0e-7));
prop_assert!(relative_eq!(dq.sclerp(&dq2, s).real, dq.real, epsilon = 1.0e-7));
// ScLERPing two unit dual quaternions with nearly equal rotation
// components should result in a unit dual quaternion with a translation
// component which is nearly equal to linearly interpolating the
// translation components of the inputs.
prop_assert!(relative_eq!(
dq.sclerp(&dq2, s).translation().vector,
dq.translation().vector.lerp(&dq2.translation().vector, s),
epsilon = 1.0e-7
));
let unit2 = t * unit;
prop_assert!(relative_eq!(unit.sclerp(&unit2, 0.0).real, unit.real, epsilon = 1.0e-7));
prop_assert!(relative_eq!(unit.sclerp(&unit2, 0.5).real, unit.real, epsilon = 1.0e-7));
prop_assert!(relative_eq!(unit.sclerp(&unit2, 1.0).real, unit.real, epsilon = 1.0e-7));
prop_assert!(relative_eq!(unit.sclerp(&unit2, s).real, unit.real, epsilon = 1.0e-7));
prop_assert!(relative_eq!(
unit.sclerp(&unit2, s).translation().vector,
unit.translation().vector.lerp(&unit2.translation().vector, s),
epsilon = 1.0e-7
));
}
#[cfg_attr(rustfmt, rustfmt_skip)]
#[test]
fn sclerp_is_not_defined_for_opposite_orientations(
dq in unit_dual_quaternion(),
s in 0.1f64..0.9f64,
t in translation3(),
t2 in translation3(),
v in vector3(),
) {
let iso = dq.to_isometry();
let rot = iso.rotation;
if let Some((axis, angle)) = rot.axis_angle() {
let flipped = UnitQuaternion::from_axis_angle(&axis, angle + std::f64::consts::PI);
let dqf = flipped * rot.inverse() * dq.clone();
prop_assert!(dq.try_sclerp(&dqf, 0.5, 1.0e-7).is_none());
prop_assert!(dq.try_sclerp(&dqf, s, 1.0e-7).is_none());
}
let dq2 = t * dq;
let iso2 = dq2.to_isometry();
let rot2 = iso2.rotation;
if let Some((axis, angle)) = rot2.axis_angle() {
let flipped = UnitQuaternion::from_axis_angle(&axis, angle + std::f64::consts::PI);
let dq3f = t2 * flipped * rot.inverse() * dq.clone();
prop_assert!(dq2.try_sclerp(&dq3f, 0.5, 1.0e-7).is_none());
prop_assert!(dq2.try_sclerp(&dq3f, s, 1.0e-7).is_none());
}
if let Some(axis) = Unit::try_new(v, 1.0e-7) {
let unit = UnitDualQuaternion::identity();
let flip = UnitQuaternion::from_axis_angle(&axis, std::f64::consts::PI);
let unitf = flip * unit;
prop_assert!(unit.try_sclerp(&unitf, 0.5, 1.0e-7).is_none());
prop_assert!(unit.try_sclerp(&unitf, s, 1.0e-7).is_none());
let unit2f = t * unit * flip;
prop_assert!(unit.try_sclerp(&unit2f, 0.5, 1.0e-7).is_none());
prop_assert!(unit.try_sclerp(&unit2f, s, 1.0e-7).is_none());
}
}
#[cfg_attr(rustfmt, rustfmt_skip)]
#[test]
fn all_op_exist(

View File

@ -326,6 +326,13 @@ fn svd_fail() {
0.07311092531259344, 0.5579247949052946, 0.14518764691585773, 0.03502980663114896, 0.7991329455957719, 0.4929930019965745,
0.12293810556077789, 0.6617084679545999, 0.9002240700227326, 0.027153062135304884, 0.3630189466989524, 0.18207502727558866,
0.843196731466686, 0.08951878746549924, 0.7533450877576973, 0.009558876499740077, 0.9429679490873482, 0.9355764454129878);
// Check unordered ...
let svd = m.clone().svd_unordered(true, true);
let recomp = svd.recompose().unwrap();
assert_relative_eq!(m, recomp, epsilon = 1.0e-5);
// ... and ordered SVD.
let svd = m.clone().svd(true, true);
let recomp = svd.recompose().unwrap();
assert_relative_eq!(m, recomp, epsilon = 1.0e-5);
@ -344,3 +351,45 @@ fn svd_err() {
svd.clone().pseudo_inverse(-1.0)
);
}
#[test]
#[rustfmt::skip]
fn svd_sorted() {
let reference = nalgebra::matrix![
1.0, 2.0, 3.0, 4.0;
5.0, 6.0, 7.0, 8.0;
9.0, 10.0, 11.0, 12.0
];
let mut svd = nalgebra::SVD {
singular_values: nalgebra::matrix![1.72261225; 2.54368356e+01; 5.14037515e-16],
u: Some(nalgebra::matrix![
-0.88915331, -0.20673589, 0.40824829;
-0.25438183, -0.51828874, -0.81649658;
0.38038964, -0.82984158, 0.40824829
]),
v_t: Some(nalgebra::matrix![
0.73286619, 0.28984978, -0.15316664, -0.59618305;
-0.40361757, -0.46474413, -0.52587069, -0.58699725;
0.44527162, -0.83143156, 0.32704826, 0.05911168
]),
};
assert_relative_eq!(
svd.recompose().expect("valid SVD"),
reference,
epsilon = 1.0e-5
);
svd.sort_by_singular_values();
// Ensure successful sorting
assert_relative_eq!(svd.singular_values.x, 2.54368356e+01, epsilon = 1.0e-5);
// Ensure that the sorted components represent the same decomposition
assert_relative_eq!(
svd.recompose().expect("valid SVD"),
reference,
epsilon = 1.0e-5
);
}