Use Complex instead of Real whenever possible on the base/ module.
This commit is contained in:
parent
9d08fdcc21
commit
7c91f2eeb5
107
src/base/blas.rs
107
src/base/blas.rs
|
@ -1,4 +1,4 @@
|
|||
use alga::general::{ClosedAdd, ClosedMul};
|
||||
use alga::general::{ClosedAdd, ClosedMul, Complex};
|
||||
#[cfg(feature = "std")]
|
||||
use matrixmultiply;
|
||||
use num::{One, Signed, Zero};
|
||||
|
@ -190,6 +190,111 @@ impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matri
|
|||
}
|
||||
}
|
||||
|
||||
impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||
/// The dot product between two complex or real vectors or matrices (seen as vectors).
|
||||
///
|
||||
/// This is the same as `.dot` except that the conjugate of each component of `self` is taken
|
||||
/// before performing the products.
|
||||
#[inline]
|
||||
pub fn cdot<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> N
|
||||
where
|
||||
SB: Storage<N, R2, C2>,
|
||||
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
|
||||
{
|
||||
assert!(
|
||||
self.nrows() == rhs.nrows(),
|
||||
"Dot product dimensions mismatch."
|
||||
);
|
||||
|
||||
// So we do some special cases for common fixed-size vectors of dimension lower than 8
|
||||
// because the `for` loop below won't be very efficient on those.
|
||||
if (R::is::<U2>() || R2::is::<U2>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
||||
unsafe {
|
||||
let a = self.get_unchecked((0, 0)).conjugate() * *rhs.get_unchecked((0, 0));
|
||||
let b = self.get_unchecked((1, 0)).conjugate() * *rhs.get_unchecked((1, 0));
|
||||
|
||||
return a + b;
|
||||
}
|
||||
}
|
||||
if (R::is::<U3>() || R2::is::<U3>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
||||
unsafe {
|
||||
let a = self.get_unchecked((0, 0)).conjugate() * *rhs.get_unchecked((0, 0));
|
||||
let b = self.get_unchecked((1, 0)).conjugate() * *rhs.get_unchecked((1, 0));
|
||||
let c = self.get_unchecked((2, 0)).conjugate() * *rhs.get_unchecked((2, 0));
|
||||
|
||||
return a + b + c;
|
||||
}
|
||||
}
|
||||
if (R::is::<U4>() || R2::is::<U4>()) && (C::is::<U1>() || C2::is::<U1>()) {
|
||||
unsafe {
|
||||
let mut a = self.get_unchecked((0, 0)).conjugate() * *rhs.get_unchecked((0, 0));
|
||||
let mut b = self.get_unchecked((1, 0)).conjugate() * *rhs.get_unchecked((1, 0));
|
||||
let c = self.get_unchecked((2, 0)).conjugate() * *rhs.get_unchecked((2, 0));
|
||||
let d = self.get_unchecked((3, 0)).conjugate() * *rhs.get_unchecked((3, 0));
|
||||
|
||||
a += c;
|
||||
b += d;
|
||||
|
||||
return a + b;
|
||||
}
|
||||
}
|
||||
|
||||
// All this is inspired from the "unrolled version" discussed in:
|
||||
// http://blog.theincredibleholk.org/blog/2012/12/10/optimizing-dot-product/
|
||||
//
|
||||
// And this comment from bluss:
|
||||
// https://users.rust-lang.org/t/how-to-zip-two-slices-efficiently/2048/12
|
||||
let mut res = N::zero();
|
||||
|
||||
// We have to define them outside of the loop (and not inside at first assignment)
|
||||
// otherwise vectorization won't kick in for some reason.
|
||||
let mut acc0;
|
||||
let mut acc1;
|
||||
let mut acc2;
|
||||
let mut acc3;
|
||||
let mut acc4;
|
||||
let mut acc5;
|
||||
let mut acc6;
|
||||
let mut acc7;
|
||||
|
||||
for j in 0..self.ncols() {
|
||||
let mut i = 0;
|
||||
|
||||
acc0 = N::zero();
|
||||
acc1 = N::zero();
|
||||
acc2 = N::zero();
|
||||
acc3 = N::zero();
|
||||
acc4 = N::zero();
|
||||
acc5 = N::zero();
|
||||
acc6 = N::zero();
|
||||
acc7 = N::zero();
|
||||
|
||||
while self.nrows() - i >= 8 {
|
||||
acc0 += unsafe { self.get_unchecked((i + 0, j)).conjugate() * *rhs.get_unchecked((i + 0, j)) };
|
||||
acc1 += unsafe { self.get_unchecked((i + 1, j)).conjugate() * *rhs.get_unchecked((i + 1, j)) };
|
||||
acc2 += unsafe { self.get_unchecked((i + 2, j)).conjugate() * *rhs.get_unchecked((i + 2, j)) };
|
||||
acc3 += unsafe { self.get_unchecked((i + 3, j)).conjugate() * *rhs.get_unchecked((i + 3, j)) };
|
||||
acc4 += unsafe { self.get_unchecked((i + 4, j)).conjugate() * *rhs.get_unchecked((i + 4, j)) };
|
||||
acc5 += unsafe { self.get_unchecked((i + 5, j)).conjugate() * *rhs.get_unchecked((i + 5, j)) };
|
||||
acc6 += unsafe { self.get_unchecked((i + 6, j)).conjugate() * *rhs.get_unchecked((i + 6, j)) };
|
||||
acc7 += unsafe { self.get_unchecked((i + 7, j)).conjugate() * *rhs.get_unchecked((i + 7, j)) };
|
||||
i += 8;
|
||||
}
|
||||
|
||||
res += acc0 + acc4;
|
||||
res += acc1 + acc5;
|
||||
res += acc2 + acc6;
|
||||
res += acc3 + acc7;
|
||||
|
||||
for k in i..self.nrows() {
|
||||
res += unsafe { self.get_unchecked((k, j)).conjugate() * *rhs.get_unchecked((k, j)) }
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
||||
where N: Scalar + Zero + ClosedAdd + ClosedMul
|
||||
{
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
use num::{One, Zero};
|
||||
use num_complex::Complex;
|
||||
#[cfg(feature = "abomonation-serialize")]
|
||||
use std::io::{Result as IOResult, Write};
|
||||
|
||||
|
@ -17,7 +16,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
|||
#[cfg(feature = "abomonation-serialize")]
|
||||
use abomonation::Abomonation;
|
||||
|
||||
use alga::general::{ClosedAdd, ClosedMul, ClosedSub, Real, Ring};
|
||||
use alga::general::{ClosedAdd, ClosedMul, ClosedSub, Real, Ring, Complex, Field};
|
||||
|
||||
use base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
|
||||
use base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
|
||||
|
@ -906,14 +905,14 @@ impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<N: Real, R: Dim, C: Dim, S: Storage<Complex<N>, R, C>> Matrix<Complex<N>, R, C, S> {
|
||||
impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||
/// Takes the conjugate and transposes `self` and store the result into `out`.
|
||||
#[inline]
|
||||
pub fn conjugate_transpose_to<R2, C2, SB>(&self, out: &mut Matrix<Complex<N>, R2, C2, SB>)
|
||||
pub fn conjugate_transpose_to<R2, C2, SB>(&self, out: &mut Matrix<N, R2, C2, SB>)
|
||||
where
|
||||
R2: Dim,
|
||||
C2: Dim,
|
||||
SB: StorageMut<Complex<N>, R2, C2>,
|
||||
SB: StorageMut<N, R2, C2>,
|
||||
ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
|
||||
{
|
||||
let (nrows, ncols) = self.shape();
|
||||
|
@ -926,7 +925,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<Complex<N>, R, C>> Matrix<Complex<N>, R
|
|||
for i in 0..nrows {
|
||||
for j in 0..ncols {
|
||||
unsafe {
|
||||
*out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).conj();
|
||||
*out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).conjugate();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -934,8 +933,8 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<Complex<N>, R, C>> Matrix<Complex<N>, R
|
|||
|
||||
/// The conjugate transposition of `self`.
|
||||
#[inline]
|
||||
pub fn conjugate_transpose(&self) -> MatrixMN<Complex<N>, C, R>
|
||||
where DefaultAllocator: Allocator<Complex<N>, C, R> {
|
||||
pub fn conjugate_transpose(&self) -> MatrixMN<N, C, R>
|
||||
where DefaultAllocator: Allocator<N, C, R> {
|
||||
let (nrows, ncols) = self.data.shape();
|
||||
|
||||
unsafe {
|
||||
|
@ -947,7 +946,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<Complex<N>, R, C>> Matrix<Complex<N>, R
|
|||
}
|
||||
}
|
||||
|
||||
impl<N: Real, D: Dim, S: StorageMut<Complex<N>, D, D>> Matrix<Complex<N>, D, D, S> {
|
||||
impl<N: Complex, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
|
||||
/// Sets `self` to its conjugate transpose.
|
||||
pub fn conjugate_transpose_mut(&mut self) {
|
||||
assert!(
|
||||
|
@ -960,10 +959,10 @@ impl<N: Real, D: Dim, S: StorageMut<Complex<N>, D, D>> Matrix<Complex<N>, D, D,
|
|||
for i in 1..dim {
|
||||
for j in 0..i {
|
||||
unsafe {
|
||||
let ref_ij = self.get_unchecked_mut((i, j)) as *mut Complex<N>;
|
||||
let ref_ji = self.get_unchecked_mut((j, i)) as *mut Complex<N>;
|
||||
let conj_ij = (*ref_ij).conj();
|
||||
let conj_ji = (*ref_ji).conj();
|
||||
let ref_ij = self.get_unchecked_mut((i, j)) as *mut N;
|
||||
let ref_ji = self.get_unchecked_mut((j, i)) as *mut N;
|
||||
let conj_ij = (*ref_ij).conjugate();
|
||||
let conj_ji = (*ref_ji).conjugate();
|
||||
*ref_ij = conj_ji;
|
||||
*ref_ji = conj_ij;
|
||||
}
|
||||
|
@ -1407,7 +1406,7 @@ impl<N: Scalar + Ring, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<N: Real, S: Storage<N, U3>> Vector<N, U3, S>
|
||||
impl<N: Scalar + Field, S: Storage<N, U3>> Vector<N, U3, S>
|
||||
where DefaultAllocator: Allocator<N, U3>
|
||||
{
|
||||
/// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`.
|
||||
|
@ -1427,27 +1426,27 @@ where DefaultAllocator: Allocator<N, U3>
|
|||
}
|
||||
}
|
||||
|
||||
impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||
impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||
/// The smallest angle between two vectors.
|
||||
#[inline]
|
||||
pub fn angle<R2: Dim, C2: Dim, SB>(&self, other: &Matrix<N, R2, C2, SB>) -> N
|
||||
pub fn angle<R2: Dim, C2: Dim, SB>(&self, other: &Matrix<N, R2, C2, SB>) -> N::Real
|
||||
where
|
||||
SB: Storage<N, R2, C2>,
|
||||
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
|
||||
{
|
||||
let prod = self.dot(other);
|
||||
let prod = self.cdot(other);
|
||||
let n1 = self.norm();
|
||||
let n2 = other.norm();
|
||||
|
||||
if n1.is_zero() || n2.is_zero() {
|
||||
N::zero()
|
||||
N::Real::zero()
|
||||
} else {
|
||||
let cang = prod / (n1 * n2);
|
||||
let cang = prod.real() / (n1 * n2);
|
||||
|
||||
if cang > N::one() {
|
||||
N::zero()
|
||||
} else if cang < -N::one() {
|
||||
N::pi()
|
||||
if cang > N::Real::one() {
|
||||
N::Real::zero()
|
||||
} else if cang < -N::Real::one() {
|
||||
N::Real::pi()
|
||||
} else {
|
||||
cang.acos()
|
||||
}
|
||||
|
@ -1478,18 +1477,18 @@ impl<N: Scalar + Zero + One + ClosedAdd + ClosedSub + ClosedMul, D: Dim, S: Stor
|
|||
}
|
||||
}
|
||||
|
||||
impl<N: Real, D: Dim, S: Storage<N, D>> Unit<Vector<N, D, S>> {
|
||||
impl<N: Complex, D: Dim, S: Storage<N, D>> Unit<Vector<N, D, S>> {
|
||||
/// Computes the spherical linear interpolation between two unit vectors.
|
||||
pub fn slerp<S2: Storage<N, D>>(
|
||||
&self,
|
||||
rhs: &Unit<Vector<N, D, S2>>,
|
||||
t: N,
|
||||
t: N::Real,
|
||||
) -> Unit<VectorN<N, D>>
|
||||
where
|
||||
DefaultAllocator: Allocator<N, D>,
|
||||
{
|
||||
// FIXME: the result is wrong when self and rhs are collinear with opposite direction.
|
||||
self.try_slerp(rhs, t, N::default_epsilon())
|
||||
self.try_slerp(rhs, t, N::Real::default_epsilon())
|
||||
.unwrap_or(Unit::new_unchecked(self.clone_owned()))
|
||||
}
|
||||
|
||||
|
@ -1500,29 +1499,29 @@ impl<N: Real, D: Dim, S: Storage<N, D>> Unit<Vector<N, D, S>> {
|
|||
pub fn try_slerp<S2: Storage<N, D>>(
|
||||
&self,
|
||||
rhs: &Unit<Vector<N, D, S2>>,
|
||||
t: N,
|
||||
epsilon: N,
|
||||
t: N::Real,
|
||||
epsilon: N::Real,
|
||||
) -> Option<Unit<VectorN<N, D>>>
|
||||
where
|
||||
DefaultAllocator: Allocator<N, D>,
|
||||
{
|
||||
let c_hang = self.dot(rhs);
|
||||
let c_hang = self.cdot(rhs).real();
|
||||
|
||||
// self == other
|
||||
if c_hang.abs() >= N::one() {
|
||||
if c_hang.abs() >= N::Real::one() {
|
||||
return Some(Unit::new_unchecked(self.clone_owned()));
|
||||
}
|
||||
|
||||
let hang = c_hang.acos();
|
||||
let s_hang = (N::one() - c_hang * c_hang).sqrt();
|
||||
let s_hang = (N::Real::one() - c_hang * c_hang).sqrt();
|
||||
|
||||
// FIXME: what if s_hang is 0.0 ? The result is not well-defined.
|
||||
if relative_eq!(s_hang, N::zero(), epsilon = epsilon) {
|
||||
if relative_eq!(s_hang, N::Real::zero(), epsilon = epsilon) {
|
||||
None
|
||||
} else {
|
||||
let ta = ((N::one() - t) * hang).sin() / s_hang;
|
||||
let ta = ((N::Real::one() - t) * hang).sin() / s_hang;
|
||||
let tb = (t * hang).sin() / s_hang;
|
||||
let res = &**self * ta + &**rhs * tb;
|
||||
let res = &**self * N::from_real(ta) + &**rhs * N::from_real(tb);
|
||||
|
||||
Some(Unit::new_unchecked(res))
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ use alga::general::{
|
|||
AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, AbstractModule,
|
||||
AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, ClosedAdd, ClosedMul,
|
||||
ClosedNeg, Field, Identity, TwoSidedInverse, JoinSemilattice, Lattice, MeetSemilattice, Module,
|
||||
Multiplicative, Real, RingCommutative,
|
||||
Multiplicative, Real, RingCommutative, Complex
|
||||
};
|
||||
use alga::linear::{
|
||||
FiniteDimInnerSpace, FiniteDimVectorSpace, InnerSpace, NormedSpace, VectorSpace,
|
||||
|
@ -145,16 +145,19 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
impl<N: Real, R: DimName, C: DimName> NormedSpace for MatrixMN<N, R, C>
|
||||
impl<N: Complex, R: DimName, C: DimName> NormedSpace for MatrixMN<N, R, C>
|
||||
where DefaultAllocator: Allocator<N, R, C>
|
||||
{
|
||||
type Real = N::Real;
|
||||
type Complex = N;
|
||||
|
||||
#[inline]
|
||||
fn norm_squared(&self) -> N {
|
||||
fn norm_squared(&self) -> N::Real {
|
||||
self.norm_squared()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn norm(&self) -> N {
|
||||
fn norm(&self) -> N::Real {
|
||||
self.norm()
|
||||
}
|
||||
|
||||
|
@ -164,34 +167,32 @@ where DefaultAllocator: Allocator<N, R, C>
|
|||
}
|
||||
|
||||
#[inline]
|
||||
fn normalize_mut(&mut self) -> N {
|
||||
fn normalize_mut(&mut self) -> N::Real {
|
||||
self.normalize_mut()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_normalize(&self, min_norm: N) -> Option<Self> {
|
||||
fn try_normalize(&self, min_norm: N::Real) -> Option<Self> {
|
||||
self.try_normalize(min_norm)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_normalize_mut(&mut self, min_norm: N) -> Option<N> {
|
||||
fn try_normalize_mut(&mut self, min_norm: N::Real) -> Option<N::Real> {
|
||||
self.try_normalize_mut(min_norm)
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: Real, R: DimName, C: DimName> InnerSpace for MatrixMN<N, R, C>
|
||||
impl<N: Complex, R: DimName, C: DimName> InnerSpace for MatrixMN<N, R, C>
|
||||
where DefaultAllocator: Allocator<N, R, C>
|
||||
{
|
||||
type Real = N;
|
||||
|
||||
#[inline]
|
||||
fn angle(&self, other: &Self) -> N {
|
||||
fn angle(&self, other: &Self) -> N::Real {
|
||||
self.angle(other)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn inner_product(&self, other: &Self) -> N {
|
||||
self.dot(other)
|
||||
self.cdot(other)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -199,7 +200,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
|||
// In particular:
|
||||
// − use `x()` instead of `::canonical_basis_element`
|
||||
// − use `::new(x, y, z)` instead of `::from_slice`
|
||||
impl<N: Real, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
|
||||
impl<N: Complex, R: DimName, C: DimName> FiniteDimInnerSpace for MatrixMN<N, R, C>
|
||||
where DefaultAllocator: Allocator<N, R, C>
|
||||
{
|
||||
#[inline]
|
||||
|
@ -215,7 +216,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
|||
}
|
||||
}
|
||||
|
||||
if vs[i].try_normalize_mut(N::zero()).is_some() {
|
||||
if vs[i].try_normalize_mut(N::Real::zero()).is_some() {
|
||||
// FIXME: this will be efficient on dynamically-allocated vectors but for
|
||||
// statically-allocated ones, `.clone_from` would be better.
|
||||
vs.swap(nbasis_elements, i);
|
||||
|
@ -268,7 +269,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
|||
let v = &vs[0];
|
||||
let mut a;
|
||||
|
||||
if v[0].abs() > v[1].abs() {
|
||||
if v[0].modulus() > v[1].modulus() {
|
||||
a = Self::from_column_slice(&[v[2], N::zero(), -v[0]]);
|
||||
} else {
|
||||
a = Self::from_column_slice(&[N::zero(), -v[2], v[1]]);
|
||||
|
@ -300,7 +301,7 @@ where DefaultAllocator: Allocator<N, R, C>
|
|||
elt -= v * elt.dot(v)
|
||||
}
|
||||
|
||||
if let Some(subsp_elt) = elt.try_normalize(N::zero()) {
|
||||
if let Some(subsp_elt) = elt.try_normalize(N::Real::zero()) {
|
||||
if !f(&subsp_elt) {
|
||||
return;
|
||||
};
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
use num::Signed;
|
||||
use num::{Signed, Zero};
|
||||
use std::cmp::PartialOrd;
|
||||
|
||||
use allocator::Allocator;
|
||||
use ::{Real, Scalar};
|
||||
use ::{Real, Complex, Scalar};
|
||||
use storage::{Storage, StorageMut};
|
||||
use base::{DefaultAllocator, Matrix, Dim, MatrixMN};
|
||||
use constraint::{SameNumberOfRows, SameNumberOfColumns, ShapeConstraint};
|
||||
|
@ -12,12 +12,12 @@ use constraint::{SameNumberOfRows, SameNumberOfColumns, ShapeConstraint};
|
|||
/// A trait for abstract matrix norms.
|
||||
///
|
||||
/// This may be moved to the alga crate in the future.
|
||||
pub trait Norm<N: Scalar> {
|
||||
pub trait Norm<N: Complex> {
|
||||
/// Apply this norm to the given matrix.
|
||||
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N
|
||||
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::Real
|
||||
where R: Dim, C: Dim, S: Storage<N, R, C>;
|
||||
/// Use the metric induced by this norm to compute the metric distance between the two given matrices.
|
||||
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N
|
||||
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::Real
|
||||
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
|
||||
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
|
||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2>;
|
||||
|
@ -30,60 +30,60 @@ pub struct LpNorm(pub i32);
|
|||
/// L-infinite norm aka. Chebytchev norm aka. uniform norm aka. suppremum norm.
|
||||
pub struct UniformNorm;
|
||||
|
||||
impl<N: Real> Norm<N> for EuclideanNorm {
|
||||
impl<N: Complex> Norm<N> for EuclideanNorm {
|
||||
#[inline]
|
||||
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N
|
||||
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::Real
|
||||
where R: Dim, C: Dim, S: Storage<N, R, C> {
|
||||
m.norm_squared().sqrt()
|
||||
m.cdot(m).real().sqrt()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N
|
||||
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::Real
|
||||
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
|
||||
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
|
||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
|
||||
m1.zip_fold(m2, N::zero(), |acc, a, b| {
|
||||
m1.zip_fold(m2, N::Real::zero(), |acc, a, b| {
|
||||
let diff = a - b;
|
||||
acc + diff * diff
|
||||
acc + (diff.conjugate() * diff).real()
|
||||
}).sqrt()
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: Real> Norm<N> for LpNorm {
|
||||
impl<N: Complex> Norm<N> for LpNorm {
|
||||
#[inline]
|
||||
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N
|
||||
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::Real
|
||||
where R: Dim, C: Dim, S: Storage<N, R, C> {
|
||||
m.fold(N::zero(), |a, b| {
|
||||
a + b.abs().powi(self.0)
|
||||
m.fold(N::Real::zero(), |a, b| {
|
||||
a + b.modulus().powi(self.0)
|
||||
}).powf(::convert(1.0 / (self.0 as f64)))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N
|
||||
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::Real
|
||||
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
|
||||
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
|
||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
|
||||
m1.zip_fold(m2, N::zero(), |acc, a, b| {
|
||||
m1.zip_fold(m2, N::Real::zero(), |acc, a, b| {
|
||||
let diff = a - b;
|
||||
acc + diff.abs().powi(self.0)
|
||||
acc + diff.modulus().powi(self.0)
|
||||
}).powf(::convert(1.0 / (self.0 as f64)))
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: Scalar + PartialOrd + Signed> Norm<N> for UniformNorm {
|
||||
impl<N: Complex> Norm<N> for UniformNorm {
|
||||
#[inline]
|
||||
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N
|
||||
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::Real
|
||||
where R: Dim, C: Dim, S: Storage<N, R, C> {
|
||||
m.amax()
|
||||
m.fold(N::Real::zero(), |acc, a| acc.max(a.modulus()))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N
|
||||
fn metric_distance<R1, C1, S1, R2, C2, S2>(&self, m1: &Matrix<N, R1, C1, S1>, m2: &Matrix<N, R2, C2, S2>) -> N::Real
|
||||
where R1: Dim, C1: Dim, S1: Storage<N, R1, C1>,
|
||||
R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
|
||||
ShapeConstraint: SameNumberOfRows<R1, R2> + SameNumberOfColumns<C1, C2> {
|
||||
m1.zip_fold(m2, N::zero(), |acc, a, b| {
|
||||
let val = (a - b).abs();
|
||||
m1.zip_fold(m2, N::Real::zero(), |acc, a, b| {
|
||||
let val = (a - b).modulus();
|
||||
if val > acc {
|
||||
val
|
||||
} else {
|
||||
|
@ -94,15 +94,15 @@ impl<N: Scalar + PartialOrd + Signed> Norm<N> for UniformNorm {
|
|||
}
|
||||
|
||||
|
||||
impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||
impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||
/// The squared L2 norm of this vector.
|
||||
#[inline]
|
||||
pub fn norm_squared(&self) -> N {
|
||||
let mut res = N::zero();
|
||||
pub fn norm_squared(&self) -> N::Real {
|
||||
let mut res = N::Real::zero();
|
||||
|
||||
for i in 0..self.ncols() {
|
||||
let col = self.column(i);
|
||||
res += col.dot(&col)
|
||||
res += col.cdot(&col).real()
|
||||
}
|
||||
|
||||
res
|
||||
|
@ -112,7 +112,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||
///
|
||||
/// Use `.apply_norm` to apply a custom norm.
|
||||
#[inline]
|
||||
pub fn norm(&self) -> N {
|
||||
pub fn norm(&self) -> N::Real {
|
||||
self.norm_squared().sqrt()
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||
///
|
||||
/// Use `.apply_metric_distance` to apply a custom norm.
|
||||
#[inline]
|
||||
pub fn metric_distance<R2, C2, S2>(&self, rhs: &Matrix<N, R2, C2, S2>) -> N
|
||||
pub fn metric_distance<R2, C2, S2>(&self, rhs: &Matrix<N, R2, C2, S2>) -> N::Real
|
||||
where R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
|
||||
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2> {
|
||||
self.apply_metric_distance(rhs, &EuclideanNorm)
|
||||
|
@ -139,7 +139,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||
/// assert_eq!(v.apply_norm(&EuclideanNorm), v.norm());
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn apply_norm(&self, norm: &impl Norm<N>) -> N {
|
||||
pub fn apply_norm(&self, norm: &impl Norm<N>) -> N::Real {
|
||||
norm.norm(self)
|
||||
}
|
||||
|
||||
|
@ -158,16 +158,10 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||
/// assert_eq!(v1.apply_metric_distance(&v2, &EuclideanNorm), (v1 - v2).norm());
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn apply_metric_distance<R2, C2, S2>(&self, rhs: &Matrix<N, R2, C2, S2>, norm: &impl Norm<N>) -> N
|
||||
pub fn apply_metric_distance<R2, C2, S2>(&self, rhs: &Matrix<N, R2, C2, S2>, norm: &impl Norm<N>) -> N::Real
|
||||
where R2: Dim, C2: Dim, S2: Storage<N, R2, C2>,
|
||||
ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2> {
|
||||
norm.metric_distance(self,rhs)
|
||||
}
|
||||
|
||||
/// The Lp norm of this matrix.
|
||||
#[inline]
|
||||
pub fn lp_norm(&self, p: i32) -> N {
|
||||
self.apply_norm(&LpNorm(p))
|
||||
norm.metric_distance(self, rhs)
|
||||
}
|
||||
|
||||
/// A synonym for the norm of this matrix.
|
||||
|
@ -176,7 +170,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||
///
|
||||
/// This function is simply implemented as a call to `norm()`
|
||||
#[inline]
|
||||
pub fn magnitude(&self) -> N {
|
||||
pub fn magnitude(&self) -> N::Real {
|
||||
self.norm()
|
||||
}
|
||||
|
||||
|
@ -186,7 +180,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||
///
|
||||
/// This function is simply implemented as a call to `norm_squared()`
|
||||
#[inline]
|
||||
pub fn magnitude_squared(&self) -> N {
|
||||
pub fn magnitude_squared(&self) -> N::Real {
|
||||
self.norm_squared()
|
||||
}
|
||||
|
||||
|
@ -194,29 +188,36 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||
#[inline]
|
||||
pub fn normalize(&self) -> MatrixMN<N, R, C>
|
||||
where DefaultAllocator: Allocator<N, R, C> {
|
||||
self / self.norm()
|
||||
self.map(|e| e.unscale(self.norm()))
|
||||
}
|
||||
|
||||
/// Returns a normalized version of this matrix unless its norm as smaller or equal to `eps`.
|
||||
#[inline]
|
||||
pub fn try_normalize(&self, min_norm: N) -> Option<MatrixMN<N, R, C>>
|
||||
pub fn try_normalize(&self, min_norm: N::Real) -> Option<MatrixMN<N, R, C>>
|
||||
where DefaultAllocator: Allocator<N, R, C> {
|
||||
let n = self.norm();
|
||||
|
||||
if n <= min_norm {
|
||||
None
|
||||
} else {
|
||||
Some(self / n)
|
||||
Some(self.map(|e| e.unscale(n)))
|
||||
}
|
||||
}
|
||||
|
||||
/// The Lp norm of this matrix.
|
||||
#[inline]
|
||||
pub fn lp_norm(&self, p: i32) -> N::Real {
|
||||
self.apply_norm(&LpNorm(p))
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: Real, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||
|
||||
impl<N: Complex, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||
/// Normalizes this matrix in-place and returns its norm.
|
||||
#[inline]
|
||||
pub fn normalize_mut(&mut self) -> N {
|
||||
pub fn normalize_mut(&mut self) -> N::Real {
|
||||
let n = self.norm();
|
||||
*self /= n;
|
||||
self.apply(|e| e.unscale(n));
|
||||
|
||||
n
|
||||
}
|
||||
|
@ -225,13 +226,13 @@ impl<N: Real, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
|||
///
|
||||
/// If the normalization succeeded, returns the old normal of this matrix.
|
||||
#[inline]
|
||||
pub fn try_normalize_mut(&mut self, min_norm: N) -> Option<N> {
|
||||
pub fn try_normalize_mut(&mut self, min_norm: N::Real) -> Option<N::Real> {
|
||||
let n = self.norm();
|
||||
|
||||
if n <= min_norm {
|
||||
None
|
||||
} else {
|
||||
*self /= n;
|
||||
self.apply(|e| e.unscale(n));
|
||||
Some(n)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
use ::{Real, Dim, Matrix, VectorN, RowVectorN, DefaultAllocator, U1, VectorSliceN};
|
||||
use ::{Scalar, Dim, Matrix, VectorN, RowVectorN, DefaultAllocator, U1, VectorSliceN};
|
||||
use alga::general::{Field, SupersetOf};
|
||||
use storage::Storage;
|
||||
use allocator::Allocator;
|
||||
|
||||
impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||
/// Returns a row vector where each element is the result of the application of `f` on the
|
||||
/// corresponding column of the original matrix.
|
||||
#[inline]
|
||||
|
@ -53,7 +54,7 @@ impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<N: Real, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||
impl<N: Scalar + Field + SupersetOf<f64>, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||
/*
|
||||
*
|
||||
* Sum computation.
|
||||
|
|
|
@ -10,7 +10,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
|||
#[cfg(feature = "abomonation-serialize")]
|
||||
use abomonation::Abomonation;
|
||||
|
||||
use alga::general::SubsetOf;
|
||||
use alga::general::{SubsetOf, Complex};
|
||||
use alga::linear::NormedSpace;
|
||||
|
||||
use ::Real;
|
||||
|
@ -66,13 +66,13 @@ impl<T: NormedSpace> Unit<T> {
|
|||
///
|
||||
/// Returns `None` if the norm was smaller or equal to `min_norm`.
|
||||
#[inline]
|
||||
pub fn try_new(value: T, min_norm: T::Field) -> Option<Self> {
|
||||
pub fn try_new(value: T, min_norm: T::Real) -> Option<Self> {
|
||||
Self::try_new_and_get(value, min_norm).map(|res| res.0)
|
||||
}
|
||||
|
||||
/// Normalize the given value and return it wrapped on a `Unit` structure and its norm.
|
||||
#[inline]
|
||||
pub fn new_and_get(mut value: T) -> (Self, T::Field) {
|
||||
pub fn new_and_get(mut value: T) -> (Self, T::Real) {
|
||||
let n = value.normalize_mut();
|
||||
|
||||
(Unit { value: value }, n)
|
||||
|
@ -82,7 +82,7 @@ impl<T: NormedSpace> Unit<T> {
|
|||
///
|
||||
/// Returns `None` if the norm was smaller or equal to `min_norm`.
|
||||
#[inline]
|
||||
pub fn try_new_and_get(mut value: T, min_norm: T::Field) -> Option<(Self, T::Field)> {
|
||||
pub fn try_new_and_get(mut value: T, min_norm: T::Real) -> Option<(Self, T::Real)> {
|
||||
if let Some(n) = value.try_normalize_mut(min_norm) {
|
||||
Some((Unit { value: value }, n))
|
||||
} else {
|
||||
|
@ -96,7 +96,7 @@ impl<T: NormedSpace> Unit<T> {
|
|||
/// Returns the norm before re-normalization. See `.renormalize_fast` for a faster alternative
|
||||
/// that may be slightly less accurate if `self` drifted significantly from having a unit length.
|
||||
#[inline]
|
||||
pub fn renormalize(&mut self) -> T::Field {
|
||||
pub fn renormalize(&mut self) -> T::Real {
|
||||
self.value.normalize_mut()
|
||||
}
|
||||
|
||||
|
@ -104,12 +104,11 @@ impl<T: NormedSpace> Unit<T> {
|
|||
/// This is useful when repeated computations might cause a drift in the norm
|
||||
/// because of float inaccuracies.
|
||||
#[inline]
|
||||
pub fn renormalize_fast(&mut self)
|
||||
where T::Field: Real {
|
||||
pub fn renormalize_fast(&mut self) {
|
||||
let sq_norm = self.value.norm_squared();
|
||||
let _3: T::Field = ::convert(3.0);
|
||||
let _0_5: T::Field = ::convert(0.5);
|
||||
self.value *= _0_5 * (_3 - sq_norm);
|
||||
let _3: T::Real = ::convert(3.0);
|
||||
let _0_5: T::Real = ::convert(0.5);
|
||||
self.value *= T::Complex::from_real(_0_5 * (_3 - sq_norm));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -118,6 +118,9 @@ impl<N: Real> FiniteDimVectorSpace for Quaternion<N> {
|
|||
}
|
||||
|
||||
impl<N: Real> NormedSpace for Quaternion<N> {
|
||||
type Real = N;
|
||||
type Complex = N;
|
||||
|
||||
#[inline]
|
||||
fn norm_squared(&self) -> N {
|
||||
self.coords.norm_squared()
|
||||
|
|
12
src/lib.rs
12
src/lib.rs
|
@ -160,7 +160,7 @@ use alga::linear::SquareMatrix as AlgaSquareMatrix;
|
|||
use alga::linear::{EuclideanSpace, FiniteDimVectorSpace, InnerSpace, NormedSpace};
|
||||
use num::Signed;
|
||||
|
||||
pub use alga::general::{Id, Real};
|
||||
pub use alga::general::{Id, Real, Complex};
|
||||
|
||||
/*
|
||||
*
|
||||
|
@ -481,7 +481,7 @@ pub fn angle<V: InnerSpace>(a: &V, b: &V) -> V::Real {
|
|||
/// Or, use [NormedSpace::norm](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.norm).
|
||||
#[deprecated(note = "use `Matrix::norm` or `Quaternion::norm` instead")]
|
||||
#[inline]
|
||||
pub fn norm<V: NormedSpace>(v: &V) -> V::Field {
|
||||
pub fn norm<V: NormedSpace>(v: &V) -> V::Real {
|
||||
v.norm()
|
||||
}
|
||||
|
||||
|
@ -501,7 +501,7 @@ pub fn norm<V: NormedSpace>(v: &V) -> V::Field {
|
|||
/// Or, use [NormedSpace::norm_squared](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.norm_squared).
|
||||
#[deprecated(note = "use `Matrix::norm_squared` or `Quaternion::norm_squared` instead")]
|
||||
#[inline]
|
||||
pub fn norm_squared<V: NormedSpace>(v: &V) -> V::Field {
|
||||
pub fn norm_squared<V: NormedSpace>(v: &V) -> V::Real {
|
||||
v.norm_squared()
|
||||
}
|
||||
|
||||
|
@ -521,7 +521,7 @@ pub fn norm_squared<V: NormedSpace>(v: &V) -> V::Field {
|
|||
/// Or, use [NormedSpace::norm](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.norm).
|
||||
#[deprecated(note = "use `Matrix::magnitude` or `Quaternion::magnitude` instead")]
|
||||
#[inline]
|
||||
pub fn magnitude<V: NormedSpace>(v: &V) -> V::Field {
|
||||
pub fn magnitude<V: NormedSpace>(v: &V) -> V::Real {
|
||||
v.norm()
|
||||
}
|
||||
|
||||
|
@ -542,7 +542,7 @@ pub fn magnitude<V: NormedSpace>(v: &V) -> V::Field {
|
|||
/// Or, use [NormedSpace::norm_squared](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.norm_squared).
|
||||
#[deprecated(note = "use `Matrix::magnitude_squared` or `Quaternion::magnitude_squared` instead")]
|
||||
#[inline]
|
||||
pub fn magnitude_squared<V: NormedSpace>(v: &V) -> V::Field {
|
||||
pub fn magnitude_squared<V: NormedSpace>(v: &V) -> V::Real {
|
||||
v.norm_squared()
|
||||
}
|
||||
|
||||
|
@ -570,7 +570,7 @@ pub fn normalize<V: NormedSpace>(v: &V) -> V {
|
|||
/// Or, use [NormedSpace::try_normalize](https://docs.rs/alga/0.7.2/alga/linear/trait.NormedSpace.html#tymethod.try_normalize).
|
||||
#[deprecated(note = "use `Matrix::try_normalize` or `Quaternion::try_normalize` instead")]
|
||||
#[inline]
|
||||
pub fn try_normalize<V: NormedSpace>(v: &V, min_norm: V::Field) -> Option<V> {
|
||||
pub fn try_normalize<V: NormedSpace>(v: &V, min_norm: V::Real) -> Option<V> {
|
||||
v.try_normalize(min_norm)
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue