Use Complex instead of Real whenever possible on the base/ module.

This commit is contained in:
sebcrozet 2019-02-23 11:24:07 +01:00
parent 9d08fdcc21
commit 7c91f2eeb5
8 changed files with 228 additions and 119 deletions

View File

@ -1,4 +1,4 @@
use alga::general::{ClosedAdd, ClosedMul}; use alga::general::{ClosedAdd, ClosedMul, Complex};
#[cfg(feature = "std")] #[cfg(feature = "std")]
use matrixmultiply; use matrixmultiply;
use num::{One, Signed, Zero}; use num::{One, Signed, Zero};
@ -190,6 +190,111 @@ impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matri
} }
} }
impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The dot product between two complex or real vectors or matrices (seen as vectors).
///
/// This is the same as `.dot` except that the conjugate of each component of `self` is taken
/// before performing the products.
#[inline]
pub fn cdot<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> N
where
SB: Storage<N, R2, C2>,
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
{
assert!(
self.nrows() == rhs.nrows(),
"Dot product dimensions mismatch."
);
// So we do some special cases for common fixed-size vectors of dimension lower than 8
// because the `for` loop below won't be very efficient on those.
if (R::is::<U2>() || R2::is::<U2>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe {
let a = self.get_unchecked((0, 0)).conjugate() * *rhs.get_unchecked((0, 0));
let b = self.get_unchecked((1, 0)).conjugate() * *rhs.get_unchecked((1, 0));
return a + b;
}
}
if (R::is::<U3>() || R2::is::<U3>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe {
let a = self.get_unchecked((0, 0)).conjugate() * *rhs.get_unchecked((0, 0));
let b = self.get_unchecked((1, 0)).conjugate() * *rhs.get_unchecked((1, 0));
let c = self.get_unchecked((2, 0)).conjugate() * *rhs.get_unchecked((2, 0));
return a + b + c;
}
}
if (R::is::<U4>() || R2::is::<U4>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe {
let mut a = self.get_unchecked((0, 0)).conjugate() * *rhs.get_unchecked((0, 0));
let mut b = self.get_unchecked((1, 0)).conjugate() * *rhs.get_unchecked((1, 0));
let c = self.get_unchecked((2, 0)).conjugate() * *rhs.get_unchecked((2, 0));
let d = self.get_unchecked((3, 0)).conjugate() * *rhs.get_unchecked((3, 0));
a += c;
b += d;
return a + b;
}
}
// All this is inspired from the "unrolled version" discussed in:
// http://blog.theincredibleholk.org/blog/2012/12/10/optimizing-dot-product/
//
// And this comment from bluss:
// https://users.rust-lang.org/t/how-to-zip-two-slices-efficiently/2048/12
let mut res = N::zero();
// We have to define them outside of the loop (and not inside at first assignment)
// otherwise vectorization won't kick in for some reason.
let mut acc0;
let mut acc1;
let mut acc2;
let mut acc3;
let mut acc4;
let mut acc5;
let mut acc6;
let mut acc7;
for j in 0..self.ncols() {
let mut i = 0;
acc0 = N::zero();
acc1 = N::zero();
acc2 = N::zero();
acc3 = N::zero();
acc4 = N::zero();
acc5 = N::zero();
acc6 = N::zero();
acc7 = N::zero();
while self.nrows() - i >= 8 {
acc0 += unsafe { self.get_unchecked((i + 0, j)).conjugate() * *rhs.get_unchecked((i + 0, j)) };
acc1 += unsafe { self.get_unchecked((i + 1, j)).conjugate() * *rhs.get_unchecked((i + 1, j)) };
acc2 += unsafe { self.get_unchecked((i + 2, j)).conjugate() * *rhs.get_unchecked((i + 2, j)) };
acc3 += unsafe { self.get_unchecked((i + 3, j)).conjugate() * *rhs.get_unchecked((i + 3, j)) };
acc4 += unsafe { self.get_unchecked((i + 4, j)).conjugate() * *rhs.get_unchecked((i + 4, j)) };
acc5 += unsafe { self.get_unchecked((i + 5, j)).conjugate() * *rhs.get_unchecked((i + 5, j)) };
acc6 += unsafe { self.get_unchecked((i + 6, j)).conjugate() * *rhs.get_unchecked((i + 6, j)) };
acc7 += unsafe { self.get_unchecked((i + 7, j)).conjugate() * *rhs.get_unchecked((i + 7, j)) };
i += 8;
}
res += acc0 + acc4;
res += acc1 + acc5;
res += acc2 + acc6;
res += acc3 + acc7;
for k in i..self.nrows() {
res += unsafe { self.get_unchecked((k, j)).conjugate() * *rhs.get_unchecked((k, j)) }
}
}
res
}
}
impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul where N: Scalar + Zero + ClosedAdd + ClosedMul
{ {

View File

@ -1,5 +1,4 @@
use num::{One, Zero}; use num::{One, Zero};
use num_complex::Complex;
#[cfg(feature = "abomonation-serialize")] #[cfg(feature = "abomonation-serialize")]
use std::io::{Result as IOResult, Write}; use std::io::{Result as IOResult, Write};
@ -17,7 +16,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[cfg(feature = "abomonation-serialize")] #[cfg(feature = "abomonation-serialize")]
use abomonation::Abomonation; use abomonation::Abomonation;
use alga::general::{ClosedAdd, ClosedMul, ClosedSub, Real, Ring}; use alga::general::{ClosedAdd, ClosedMul, ClosedSub, Real, Ring, Complex, Field};
use base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR}; use base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
use base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
@ -906,14 +905,14 @@ impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
} }
} }
impl<N: Real, R: Dim, C: Dim, S: Storage<Complex<N>, R, C>> Matrix<Complex<N>, R, C, S> { impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Takes the conjugate and transposes `self` and store the result into `out`. /// Takes the conjugate and transposes `self` and store the result into `out`.
#[inline] #[inline]
pub fn conjugate_transpose_to<R2, C2, SB>(&self, out: &mut Matrix<Complex<N>, R2, C2, SB>) pub fn conjugate_transpose_to<R2, C2, SB>(&self, out: &mut Matrix<N, R2, C2, SB>)
where where
R2: Dim, R2: Dim,
C2: Dim, C2: Dim,
SB: StorageMut<Complex<N>, R2, C2>, SB: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>, ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
{ {
let (nrows, ncols) = self.shape(); let (nrows, ncols) = self.shape();
@ -926,7 +925,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<Complex<N>, R, C>> Matrix<Complex<N>, R
for i in 0..nrows { for i in 0..nrows {
for j in 0..ncols { for j in 0..ncols {
unsafe { unsafe {
*out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).conj(); *out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).conjugate();
} }
} }
} }
@ -934,8 +933,8 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<Complex<N>, R, C>> Matrix<Complex<N>, R
/// The conjugate transposition of `self`. /// The conjugate transposition of `self`.
#[inline] #[inline]
pub fn conjugate_transpose(&self) -> MatrixMN<Complex<N>, C, R> pub fn conjugate_transpose(&self) -> MatrixMN<N, C, R>
where DefaultAllocator: Allocator<Complex<N>, C, R> { where DefaultAllocator: Allocator<N, C, R> {
let (nrows, ncols) = self.data.shape(); let (nrows, ncols) = self.data.shape();
unsafe { unsafe {
@ -947,7 +946,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<Complex<N>, R, C>> Matrix<Complex<N>, R
} }
} }
impl<N: Real, D: Dim, S: StorageMut<Complex<N>, D, D>> Matrix<Complex<N>, D, D, S> { impl<N: Complex, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
/// Sets `self` to its conjugate transpose. /// Sets `self` to its conjugate transpose.
pub fn conjugate_transpose_mut(&mut self) { pub fn conjugate_transpose_mut(&mut self) {
assert!( assert!(
@ -960,10 +959,10 @@ impl<N: Real, D: Dim, S: StorageMut<Complex<N>, D, D>> Matrix<Complex<N>, D, D,
for i in 1..dim { for i in 1..dim {
for j in 0..i { for j in 0..i {
unsafe { unsafe {
let ref_ij = self.get_unchecked_mut((i, j)) as *mut Complex<N>; let ref_ij = self.get_unchecked_mut((i, j)) as *mut N;
let ref_ji = self.get_unchecked_mut((j, i)) as *mut Complex<N>; let ref_ji = self.get_unchecked_mut((j, i)) as *mut N;
let conj_ij = (*ref_ij).conj(); let conj_ij = (*ref_ij).conjugate();
let conj_ji = (*ref_ji).conj(); let conj_ji = (*ref_ji).conjugate();
*ref_ij = conj_ji; *ref_ij = conj_ji;
*ref_ji = conj_ij; *ref_ji = conj_ij;
} }
@ -1407,7 +1406,7 @@ impl<N: Scalar + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
} }
} }
impl<N: Real, S: Storage<N, U3>> Vector<N, U3, S> impl<N: Scalar + Field, S: Storage<N, U3>> Vector<N, U3, S>
where DefaultAllocator: Allocator<N, U3> where DefaultAllocator: Allocator<N, U3>
{ {
/// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`. /// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`.
@ -1427,27 +1426,27 @@ where DefaultAllocator: Allocator<N, U3>
} }
} }
impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> { impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The smallest angle between two vectors. /// The smallest angle between two vectors.
#[inline] #[inline]
pub fn angle<R2: Dim, C2: Dim, SB>(&self, other: &Matrix<N, R2, C2, SB>) -> N pub fn angle<R2: Dim, C2: Dim, SB>(&self, other: &Matrix<N, R2, C2, SB>) -> N::Real
where where
SB: Storage<N, R2, C2>, SB: Storage<N, R2, C2>,
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>, ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
{ {
let prod = self.dot(other); let prod = self.cdot(other);
let n1 = self.norm(); let n1 = self.norm();
let n2 = other.norm(); let n2 = other.norm();
if n1.is_zero() || n2.is_zero() { if n1.is_zero() || n2.is_zero() {
N::zero() N::Real::zero()
} else { } else {
let cang = prod / (n1 * n2); let cang = prod.real() / (n1 * n2);
if cang > N::one() { if cang > N::Real::one() {
N::zero() N::Real::zero()
} else if cang < -N::one() { } else if cang < -N::Real::one() {
N::pi() N::Real::pi()
} else { } else {
cang.acos() cang.acos()
} }
@ -1478,18 +1477,18 @@ impl<N: Scalar + Zero + One + ClosedAdd + ClosedSub + ClosedMul, D: Dim, S: Stor
} }
} }
impl<N: Real, D: Dim, S: Storage<N, D>> Unit<Vector<N, D, S>> { impl<N: Complex, D: Dim, S: Storage<N, D>> Unit<Vector<N, D, S>> {
/// Computes the spherical linear interpolation between two unit vectors. /// Computes the spherical linear interpolation between two unit vectors.
pub fn slerp<S2: Storage<N, D>>( pub fn slerp<S2: Storage<N, D>>(
&self, &self,
rhs: &Unit<Vector<N, D, S2>>, rhs: &Unit<Vector<N, D, S2>>,
t: N, t: N::Real,
) -> Unit<VectorN<N, D>> ) -> Unit<VectorN<N, D>>
where where
DefaultAllocator: Allocator<N, D>, DefaultAllocator: Allocator<N, D>,
{ {
// FIXME: the result is wrong when self and rhs are collinear with opposite direction. // FIXME: the result is wrong when self and rhs are collinear with opposite direction.
self.try_slerp(rhs, t, N::default_epsilon()) self.try_slerp(rhs, t, N::Real::default_epsilon())
.unwrap_or(Unit::new_unchecked(self.clone_owned())) .unwrap_or(Unit::new_unchecked(self.clone_owned()))
} }
@ -1500,29 +1499,29 @@ impl<N: Real, D: Dim, S: Storage<N, D>> Unit<Vector<N, D, S>> {
pub fn try_slerp<S2: Storage<N, D>>( pub fn try_slerp<S2: Storage<N, D>>(
&self, &self,
rhs: &Unit<Vector<N, D, S2>>, rhs: &Unit<Vector<N, D, S2>>,
t: N, t: N::Real,
epsilon: N, epsilon: N::Real,
) -> Option<Unit<VectorN<N, D>>> ) -> Option<Unit<VectorN<N, D>>>
where where
DefaultAllocator: Allocator<N, D>, DefaultAllocator: Allocator<N, D>,
{ {
let c_hang = self.dot(rhs); let c_hang = self.cdot(rhs).real();
// self == other // self == other
if c_hang.abs() >= N::one() { if c_hang.abs() >= N::Real::one() {
return Some(Unit::new_unchecked(self.clone_owned())); return Some(Unit::new_unchecked(self.clone_owned()));
} }
let hang = c_hang.acos(); let hang = c_hang.acos();
let s_hang = (N::one() - c_hang * c_hang).sqrt(); let s_hang = (N::Real::one() - c_hang * c_hang).sqrt();
// FIXME: what if s_hang is 0.0 ? The result is not well-defined. // FIXME: what if s_hang is 0.0 ? The result is not well-defined.
if relative_eq!(s_hang, N::zero(), epsilon = epsilon) { if relative_eq!(s_hang, N::Real::zero(), epsilon = epsilon) {
None None
} else { } else {
let ta = ((N::one() - t) * hang).sin() / s_hang; let ta = ((N::Real::one() - t) * hang).sin() / s_hang;
let tb = (t * hang).sin() / s_hang; let tb = (t * hang).sin() / s_hang;
let res = &**self * ta + &**rhs * tb; let res = &**self * N::from_real(ta) + &**rhs * N::from_real(tb);
Some(Unit::new_unchecked(res)) Some(Unit::new_unchecked(res))
} }

View File

@ -7,7 +7,7 @@ use alga::general::{
AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, AbstractModule, AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, AbstractModule,
AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, ClosedAdd, ClosedMul, AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, ClosedAdd, ClosedMul,
ClosedNeg, Field, Identity, TwoSidedInverse, JoinSemilattice, Lattice, MeetSemilattice, Module, ClosedNeg, Field, Identity, TwoSidedInverse, JoinSemilattice, Lattice, MeetSemilattice, Module,
Multiplicative, Real, RingCommutative, Multiplicative, Real, RingCommutative, Complex
}; };
use alga::linear::{ use alga::linear::{
FiniteDimInnerSpace, FiniteDimVectorSpace, InnerSpace, NormedSpace, VectorSpace, FiniteDimInnerSpace, FiniteDimVectorSpace, InnerSpace, NormedSpace, VectorSpace,
@ -145,16 +145,19 @@ where
} }
} }
impl<N: Real, R: DimName, C: DimName> NormedSpace for MatrixMN<N, R, C> impl<N: Complex, R: DimName, C: DimName> NormedSpace for MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> where DefaultAllocator: Allocator<N, R, C>
{ {
type Real = N::Real;
type Complex = N;
#[inline] #[inline]
fn norm_squared(&self) -> N { fn norm_squared(&self) -> N::Real {
self.norm_squared() self.norm_squared()
} }
#[inline] #[inline]
fn norm(&self) -> N { fn norm(&self) -> N::Real {
self.norm() self.norm()
} }
@ -164,34 +167,32 @@ where DefaultAllocator: Allocator<N, R, C>
} }
#[inline] #[inline]
fn normalize_mut(&mut self) -> N { fn normalize_mut(&mut self) -> N::Real {
self.normalize_mut() self.normalize_mut()
} }
#[inline] #[inline]
fn try_normalize(&self, min_norm: N) -> Option<Self> { fn try_normalize(&self, min_norm: N::Real) -> Option<Self> {
self.try_normalize(min_norm) self.try_normalize(min_norm)
} }
#[inline] #[inline]
fn try_normalize_mut(&mut self, min_norm: N) -> Option<N> { fn try_normalize_mut(&mut self, min_norm: N::Real) -> Option<N::Real> {
self.try_normalize_mut(min_norm) self.try_normalize_mut(min_norm)
} }
} }
impl<N: Real, R: DimName, C: DimName> InnerSpace for MatrixMN<N, R, C> impl<N: Complex, R: DimName, C: DimName> InnerSpace for MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> where DefaultAllocator: Allocator<N, R, C>
{ {
type Real = N;
#[inline] #[inline]
fn angle(&self, other: &Self) -> N { fn angle(&self, other: &Self) -> N::Real {
self.angle(other) self.angle(other)
} }
#[inline] #[inline]
fn inner_product(&self, other: &Self) -> N { fn inner_product(&self, other: &Self) -> N {
self.dot(other) self.cdot(other)
} }
} }
@ -199,7 +200,7 @@ where DefaultAllocator: Allocator<N, R, C>
// In particular: // In particular:
// use `x()` instead of `::canonical_basis_element` // use `x()` instead of `::canonical_basis_element`
// use `::new(x, y, z)` instead of `::from_slice` // use `::new(x, y, z)` instead of `::from_slice`
impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C> impl<N: Complex, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> where DefaultAllocator: Allocator<N, R, C>
{ {
#[inline] #[inline]
@ -215,7 +216,7 @@ where DefaultAllocator: Allocator<N, R, C>
} }
} }
if vs[i].try_normalize_mut(N::zero()).is_some() { if vs[i].try_normalize_mut(N::Real::zero()).is_some() {
// FIXME: this will be efficient on dynamically-allocated vectors but for // FIXME: this will be efficient on dynamically-allocated vectors but for
// statically-allocated ones, `.clone_from` would be better. // statically-allocated ones, `.clone_from` would be better.
vs.swap(nbasis_elements, i); vs.swap(nbasis_elements, i);
@ -268,7 +269,7 @@ where DefaultAllocator: Allocator<N, R, C>
let v = &vs[0]; let v = &vs[0];
let mut a; let mut a;
if v[0].abs() > v[1].abs() { if v[0].modulus() > v[1].modulus() {
a = Self::from_column_slice(&[v[2], N::zero(), -v[0]]); a = Self::from_column_slice(&[v[2], N::zero(), -v[0]]);
} else { } else {
a = Self::from_column_slice(&[N::zero(), -v[2], v[1]]); a = Self::from_column_slice(&[N::zero(), -v[2], v[1]]);
@ -300,7 +301,7 @@ where DefaultAllocator: Allocator<N, R, C>
elt -= v * elt.dot(v) elt -= v * elt.dot(v)
} }
if let Some(subsp_elt) = elt.try_normalize(N::zero()) { if let Some(subsp_elt) = elt.try_normalize(N::Real::zero()) {
if !f(&subsp_elt) { if !f(&subsp_elt) {
return; return;
}; };

View File

@ -1,8 +1,8 @@
use num::Signed; use num::{Signed, Zero};
use std::cmp::PartialOrd; use std::cmp::PartialOrd;
use allocator::Allocator; use allocator::Allocator;
use ::{Real, Scalar}; use ::{Real, Complex, Scalar};
use storage::{Storage, StorageMut}; use storage::{Storage, StorageMut};
use base::{DefaultAllocator, Matrix, Dim, MatrixMN}; use base::{DefaultAllocator, Matrix, Dim, MatrixMN};
use constraint::{SameNumberOfRows, SameNumberOfColumns, ShapeConstraint}; use constraint::{SameNumberOfRows, SameNumberOfColumns, ShapeConstraint};
@ -12,12 +12,12 @@ use constraint::{SameNumberOfRows, SameNumberOfColumns, ShapeConstraint};
/// A trait for abstract matrix norms. /// A trait for abstract matrix norms.
/// ///
/// This may be moved to the alga crate in the future. /// This may be moved to the alga crate in the future.
pub trait Norm<N: Scalar> { pub trait Norm<N: Complex> {
/// Apply this norm to the given matrix. /// Apply this norm to the given matrix.
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::Real
where R: Dim, C: Dim, S: Storage<N, R, C>; where R: Dim, C: Dim, S: Storage<N, R, C>;
/// Use the metric induced by this norm to compute the metric distance between the two given matrices. /// Use the metric induced by this norm to compute the metric distance between the two given matrices.
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::Real
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>, where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>, R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>; ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>;
@ -30,60 +30,60 @@ pub struct LpNorm(pub i32);
/// L-infinite norm aka. Chebytchev norm aka. uniform norm aka. suppremum norm. /// L-infinite norm aka. Chebytchev norm aka. uniform norm aka. suppremum norm.
pub struct UniformNorm; pub struct UniformNorm;
impl<N: Real> Norm<N> for EuclideanNorm { impl<N: Complex> Norm<N> for EuclideanNorm {
#[inline] #[inline]
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::Real
where R: Dim, C: Dim, S: Storage<N, R, C> { where R: Dim, C: Dim, S: Storage<N, R, C> {
m.norm_squared().sqrt() m.cdot(m).real().sqrt()
} }
#[inline] #[inline]
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::Real
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>, where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>, R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> { ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
m1.zip_fold(m2, N::zero(), |acc, a, b| { m1.zip_fold(m2, N::Real::zero(), |acc, a, b| {
let diff = a - b; let diff = a - b;
acc + diff * diff acc + (diff.conjugate() * diff).real()
}).sqrt() }).sqrt()
} }
} }
impl<N: Real> Norm<N> for LpNorm { impl<N: Complex> Norm<N> for LpNorm {
#[inline] #[inline]
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::Real
where R: Dim, C: Dim, S: Storage<N, R, C> { where R: Dim, C: Dim, S: Storage<N, R, C> {
m.fold(N::zero(), |a, b| { m.fold(N::Real::zero(), |a, b| {
a + b.abs().powi(self.0) a + b.modulus().powi(self.0)
}).powf(::convert(1.0 / (self.0 as f64))) }).powf(::convert(1.0 / (self.0 as f64)))
} }
#[inline] #[inline]
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::Real
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>, where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>, R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> { ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
m1.zip_fold(m2, N::zero(), |acc, a, b| { m1.zip_fold(m2, N::Real::zero(), |acc, a, b| {
let diff = a - b; let diff = a - b;
acc + diff.abs().powi(self.0) acc + diff.modulus().powi(self.0)
}).powf(::convert(1.0 / (self.0 as f64))) }).powf(::convert(1.0 / (self.0 as f64)))
} }
} }
impl<N: Scalar + PartialOrd + Signed> Norm<N> for UniformNorm { impl<N: Complex> Norm<N> for UniformNorm {
#[inline] #[inline]
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::Real
where R: Dim, C: Dim, S: Storage<N, R, C> { where R: Dim, C: Dim, S: Storage<N, R, C> {
m.amax() m.fold(N::Real::zero(), |acc, a| acc.max(a.modulus()))
} }
#[inline] #[inline]
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::Real
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>, where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>, R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> { ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
m1.zip_fold(m2, N::zero(), |acc, a, b| { m1.zip_fold(m2, N::Real::zero(), |acc, a, b| {
let val = (a - b).abs(); let val = (a - b).modulus();
if val > acc { if val > acc {
val val
} else { } else {
@ -94,15 +94,15 @@ impl<N: Scalar + PartialOrd + Signed> Norm<N> for UniformNorm {
} }
impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> { impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// The squared L2 norm of this vector. /// The squared L2 norm of this vector.
#[inline] #[inline]
pub fn norm_squared(&self) -> N { pub fn norm_squared(&self) -> N::Real {
let mut res = N::zero(); let mut res = N::Real::zero();
for i in 0..self.ncols() { for i in 0..self.ncols() {
let col = self.column(i); let col = self.column(i);
res += col.dot(&col) res += col.cdot(&col).real()
} }
res res
@ -112,7 +112,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// ///
/// Use `.apply_norm` to apply a custom norm. /// Use `.apply_norm` to apply a custom norm.
#[inline] #[inline]
pub fn norm(&self) -> N { pub fn norm(&self) -> N::Real {
self.norm_squared().sqrt() self.norm_squared().sqrt()
} }
@ -120,7 +120,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// ///
/// Use `.apply_metric_distance` to apply a custom norm. /// Use `.apply_metric_distance` to apply a custom norm.
#[inline] #[inline]
pub fn metric_distance<R2, C2, S2>(&self, rhs: &Matrix<N, R2, C2, S2>) -> N pub fn metric_distance<R2, C2, S2>(&self, rhs: &Matrix<N, R2, C2, S2>) -> N::Real
where R2: Dim, C2: Dim, S2: Storage<N, R2, C2>, where R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2> { ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2> {
self.apply_metric_distance(rhs, &EuclideanNorm) self.apply_metric_distance(rhs, &EuclideanNorm)
@ -139,7 +139,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// assert_eq!(v.apply_norm(&EuclideanNorm), v.norm()); /// assert_eq!(v.apply_norm(&EuclideanNorm), v.norm());
/// ``` /// ```
#[inline] #[inline]
pub fn apply_norm(&self, norm: &impl Norm<N>) -> N { pub fn apply_norm(&self, norm: &impl Norm<N>) -> N::Real {
norm.norm(self) norm.norm(self)
} }
@ -158,16 +158,10 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// assert_eq!(v1.apply_metric_distance(&v2, &EuclideanNorm), (v1 - v2).norm()); /// assert_eq!(v1.apply_metric_distance(&v2, &EuclideanNorm), (v1 - v2).norm());
/// ``` /// ```
#[inline] #[inline]
pub fn apply_metric_distance<R2, C2, S2>(&self, rhs: &Matrix<N, R2, C2, S2>, norm: &impl Norm<N>) -> N pub fn apply_metric_distance<R2, C2, S2>(&self, rhs: &Matrix<N, R2, C2, S2>, norm: &impl Norm<N>) -> N::Real
where R2: Dim, C2: Dim, S2: Storage<N, R2, C2>, where R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2> { ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2> {
norm.metric_distance(self,rhs) norm.metric_distance(self, rhs)
}
/// The Lp norm of this matrix.
#[inline]
pub fn lp_norm(&self, p: i32) -> N {
self.apply_norm(&LpNorm(p))
} }
/// A synonym for the norm of this matrix. /// A synonym for the norm of this matrix.
@ -176,7 +170,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// ///
/// This function is simply implemented as a call to `norm()` /// This function is simply implemented as a call to `norm()`
#[inline] #[inline]
pub fn magnitude(&self) -> N { pub fn magnitude(&self) -> N::Real {
self.norm() self.norm()
} }
@ -186,7 +180,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// ///
/// This function is simply implemented as a call to `norm_squared()` /// This function is simply implemented as a call to `norm_squared()`
#[inline] #[inline]
pub fn magnitude_squared(&self) -> N { pub fn magnitude_squared(&self) -> N::Real {
self.norm_squared() self.norm_squared()
} }
@ -194,29 +188,36 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[inline] #[inline]
pub fn normalize(&self) -> MatrixMN<N, R, C> pub fn normalize(&self) -> MatrixMN<N, R, C>
where DefaultAllocator: Allocator<N, R, C> { where DefaultAllocator: Allocator<N, R, C> {
self / self.norm() self.map(|e| e.unscale(self.norm()))
} }
/// Returns a normalized version of this matrix unless its norm as smaller or equal to `eps`. /// Returns a normalized version of this matrix unless its norm as smaller or equal to `eps`.
#[inline] #[inline]
pub fn try_normalize(&self, min_norm: N) -> Option<MatrixMN<N, R, C>> pub fn try_normalize(&self, min_norm: N::Real) -> Option<MatrixMN<N, R, C>>
where DefaultAllocator: Allocator<N, R, C> { where DefaultAllocator: Allocator<N, R, C> {
let n = self.norm(); let n = self.norm();
if n <= min_norm { if n <= min_norm {
None None
} else { } else {
Some(self / n) Some(self.map(|e| e.unscale(n)))
} }
} }
/// The Lp norm of this matrix.
#[inline]
pub fn lp_norm(&self, p: i32) -> N::Real {
self.apply_norm(&LpNorm(p))
}
} }
impl<N: Real, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
impl<N: Complex, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// Normalizes this matrix in-place and returns its norm. /// Normalizes this matrix in-place and returns its norm.
#[inline] #[inline]
pub fn normalize_mut(&mut self) -> N { pub fn normalize_mut(&mut self) -> N::Real {
let n = self.norm(); let n = self.norm();
*self /= n; self.apply(|e| e.unscale(n));
n n
} }
@ -225,13 +226,13 @@ impl<N: Real, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
/// ///
/// If the normalization succeeded, returns the old normal of this matrix. /// If the normalization succeeded, returns the old normal of this matrix.
#[inline] #[inline]
pub fn try_normalize_mut(&mut self, min_norm: N) -> Option<N> { pub fn try_normalize_mut(&mut self, min_norm: N::Real) -> Option<N::Real> {
let n = self.norm(); let n = self.norm();
if n <= min_norm { if n <= min_norm {
None None
} else { } else {
*self /= n; self.apply(|e| e.unscale(n));
Some(n) Some(n)
} }
} }

View File

@ -1,8 +1,9 @@
use ::{Real, Dim, Matrix, VectorN, RowVectorN, DefaultAllocator, U1, VectorSliceN}; use ::{Scalar, Dim, Matrix, VectorN, RowVectorN, DefaultAllocator, U1, VectorSliceN};
use alga::general::{Field, SupersetOf};
use storage::Storage; use storage::Storage;
use allocator::Allocator; use allocator::Allocator;
impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> { impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Returns a row vector where each element is the result of the application of `f` on the /// Returns a row vector where each element is the result of the application of `f` on the
/// corresponding column of the original matrix. /// corresponding column of the original matrix.
#[inline] #[inline]
@ -53,7 +54,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
} }
} }
impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> { impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/* /*
* *
* Sum computation. * Sum computation.

View File

@ -10,7 +10,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[cfg(feature = "abomonation-serialize")] #[cfg(feature = "abomonation-serialize")]
use abomonation::Abomonation; use abomonation::Abomonation;
use alga::general::SubsetOf; use alga::general::{SubsetOf, Complex};
use alga::linear::NormedSpace; use alga::linear::NormedSpace;
use ::Real; use ::Real;
@ -66,13 +66,13 @@ impl<T: NormedSpace> Unit<T> {
/// ///
/// Returns `None` if the norm was smaller or equal to `min_norm`. /// Returns `None` if the norm was smaller or equal to `min_norm`.
#[inline] #[inline]
pub fn try_new(value: T, min_norm: T::Field) -> Option<Self> { pub fn try_new(value: T, min_norm: T::Real) -> Option<Self> {
Self::try_new_and_get(value, min_norm).map(|res| res.0) Self::try_new_and_get(value, min_norm).map(|res| res.0)
} }
/// Normalize the given value and return it wrapped on a `Unit` structure and its norm. /// Normalize the given value and return it wrapped on a `Unit` structure and its norm.
#[inline] #[inline]
pub fn new_and_get(mut value: T) -> (Self, T::Field) { pub fn new_and_get(mut value: T) -> (Self, T::Real) {
let n = value.normalize_mut(); let n = value.normalize_mut();
(Unit { value: value }, n) (Unit { value: value }, n)
@ -82,7 +82,7 @@ impl<T: NormedSpace> Unit<T> {
/// ///
/// Returns `None` if the norm was smaller or equal to `min_norm`. /// Returns `None` if the norm was smaller or equal to `min_norm`.
#[inline] #[inline]
pub fn try_new_and_get(mut value: T, min_norm: T::Field) -> Option<(Self, T::Field)> { pub fn try_new_and_get(mut value: T, min_norm: T::Real) -> Option<(Self, T::Real)> {
if let Some(n) = value.try_normalize_mut(min_norm) { if let Some(n) = value.try_normalize_mut(min_norm) {
Some((Unit { value: value }, n)) Some((Unit { value: value }, n))
} else { } else {
@ -96,7 +96,7 @@ impl<T: NormedSpace> Unit<T> {
/// Returns the norm before re-normalization. See `.renormalize_fast` for a faster alternative /// Returns the norm before re-normalization. See `.renormalize_fast` for a faster alternative
/// that may be slightly less accurate if `self` drifted significantly from having a unit length. /// that may be slightly less accurate if `self` drifted significantly from having a unit length.
#[inline] #[inline]
pub fn renormalize(&mut self) -> T::Field { pub fn renormalize(&mut self) -> T::Real {
self.value.normalize_mut() self.value.normalize_mut()
} }
@ -104,12 +104,11 @@ impl<T: NormedSpace> Unit<T> {
/// This is useful when repeated computations might cause a drift in the norm /// This is useful when repeated computations might cause a drift in the norm
/// because of float inaccuracies. /// because of float inaccuracies.
#[inline] #[inline]
pub fn renormalize_fast(&mut self) pub fn renormalize_fast(&mut self) {
where T::Field: Real {
let sq_norm = self.value.norm_squared(); let sq_norm = self.value.norm_squared();
let _3: T::Field = ::convert(3.0); let _3: T::Real = ::convert(3.0);
let _0_5: T::Field = ::convert(0.5); let _0_5: T::Real = ::convert(0.5);
self.value *= _0_5 * (_3 - sq_norm); self.value *= T::Complex::from_real(_0_5 * (_3 - sq_norm));
} }
} }

View File

@ -118,6 +118,9 @@ impl<N: Real> FiniteDimVectorSpace for Quaternion<N> {
} }
impl<N: Real> NormedSpace for Quaternion<N> { impl<N: Real> NormedSpace for Quaternion<N> {
type Real = N;
type Complex = N;
#[inline] #[inline]
fn norm_squared(&self) -> N { fn norm_squared(&self) -> N {
self.coords.norm_squared() self.coords.norm_squared()

View File

@ -160,7 +160,7 @@ use alga::linear::SquareMatrix as AlgaSquareMatrix;
use alga::linear::{EuclideanSpace, FiniteDimVectorSpace, InnerSpace, NormedSpace}; use alga::linear::{EuclideanSpace, FiniteDimVectorSpace, InnerSpace, NormedSpace};
use num::Signed; use num::Signed;
pub use alga::general::{Id, Real}; pub use alga::general::{Id, Real, Complex};
/* /*
* *
@ -481,7 +481,7 @@ pub fn angle<V: InnerSpace>(a: &V, b: &V) -> V::Real {
/// Or, use [NormedSpace::norm](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.norm). /// Or, use [NormedSpace::norm](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.norm).
#[deprecated(note = "use `Matrix::norm` or `Quaternion::norm` instead")] #[deprecated(note = "use `Matrix::norm` or `Quaternion::norm` instead")]
#[inline] #[inline]
pub fn norm<V: NormedSpace>(v: &V) -> V::Field { pub fn norm<V: NormedSpace>(v: &V) -> V::Real {
v.norm() v.norm()
} }
@ -501,7 +501,7 @@ pub fn norm<V: NormedSpace>(v: &V) -> V::Field {
/// Or, use [NormedSpace::norm_squared](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.norm_squared). /// Or, use [NormedSpace::norm_squared](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.norm_squared).
#[deprecated(note = "use `Matrix::norm_squared` or `Quaternion::norm_squared` instead")] #[deprecated(note = "use `Matrix::norm_squared` or `Quaternion::norm_squared` instead")]
#[inline] #[inline]
pub fn norm_squared<V: NormedSpace>(v: &V) -> V::Field { pub fn norm_squared<V: NormedSpace>(v: &V) -> V::Real {
v.norm_squared() v.norm_squared()
} }
@ -521,7 +521,7 @@ pub fn norm_squared<V: NormedSpace>(v: &V) -> V::Field {
/// Or, use [NormedSpace::norm](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.norm). /// Or, use [NormedSpace::norm](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.norm).
#[deprecated(note = "use `Matrix::magnitude` or `Quaternion::magnitude` instead")] #[deprecated(note = "use `Matrix::magnitude` or `Quaternion::magnitude` instead")]
#[inline] #[inline]
pub fn magnitude<V: NormedSpace>(v: &V) -> V::Field { pub fn magnitude<V: NormedSpace>(v: &V) -> V::Real {
v.norm() v.norm()
} }
@ -542,7 +542,7 @@ pub fn magnitude<V: NormedSpace>(v: &V) -> V::Field {
/// Or, use [NormedSpace::norm_squared](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.norm_squared). /// Or, use [NormedSpace::norm_squared](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.norm_squared).
#[deprecated(note = "use `Matrix::magnitude_squared` or `Quaternion::magnitude_squared` instead")] #[deprecated(note = "use `Matrix::magnitude_squared` or `Quaternion::magnitude_squared` instead")]
#[inline] #[inline]
pub fn magnitude_squared<V: NormedSpace>(v: &V) -> V::Field { pub fn magnitude_squared<V: NormedSpace>(v: &V) -> V::Real {
v.norm_squared() v.norm_squared()
} }
@ -570,7 +570,7 @@ pub fn normalize<V: NormedSpace>(v: &V) -> V {
/// Or, use [NormedSpace::try_normalize](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.try_normalize). /// Or, use [NormedSpace::try_normalize](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.try_normalize).
#[deprecated(note = "use `Matrix::try_normalize` or `Quaternion::try_normalize` instead")] #[deprecated(note = "use `Matrix::try_normalize` or `Quaternion::try_normalize` instead")]
#[inline] #[inline]
pub fn try_normalize<V: NormedSpace>(v: &V, min_norm: V::Field) -> Option<V> { pub fn try_normalize<V: NormedSpace>(v: &V, min_norm: V::Real) -> Option<V> {
v.try_normalize(min_norm) v.try_normalize(min_norm)
} }