use num::{Zero, One}; use alga::general::{AbstractMagma, AbstractGroupAbelian, AbstractGroup, AbstractLoop, AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, AbstractModule, Module, Field, RingCommutative, Real, Inverse, Additive, Multiplicative, MeetSemilattice, JoinSemilattice, Lattice, Identity, ClosedAdd, ClosedNeg, ClosedMul}; use alga::linear::{VectorSpace, NormedSpace, InnerSpace, FiniteDimVectorSpace, FiniteDimInnerSpace}; use core::{DefaultAllocator, Scalar, MatrixMN, MatrixN}; use core::dimension::{Dim, DimName}; use core::storage::{Storage, StorageMut}; use core::allocator::Allocator; /* * * Additive structures. * */ impl Identity for MatrixMN where N: Scalar + Zero, DefaultAllocator: Allocator { #[inline] fn identity() -> Self { Self::from_element(N::zero()) } } impl AbstractMagma for MatrixMN where N: Scalar + ClosedAdd, DefaultAllocator: Allocator { #[inline] fn operate(&self, other: &Self) -> Self { self + other } } impl Inverse for MatrixMN where N: Scalar + ClosedNeg, DefaultAllocator: Allocator { #[inline] fn inverse(&self) -> MatrixMN { -self } #[inline] fn inverse_mut(&mut self) { *self = -self.clone() } } macro_rules! inherit_additive_structure( ($($marker: ident<$operator: ident> $(+ $bounds: ident)*),* $(,)*) => {$( impl $marker<$operator> for MatrixMN where N: Scalar + $marker<$operator> $(+ $bounds)*, DefaultAllocator: Allocator { } )*} ); inherit_additive_structure!( AbstractSemigroup + ClosedAdd, AbstractMonoid + Zero + ClosedAdd, AbstractQuasigroup + ClosedAdd + ClosedNeg, AbstractLoop + Zero + ClosedAdd + ClosedNeg, AbstractGroup + Zero + ClosedAdd + ClosedNeg, AbstractGroupAbelian + Zero + ClosedAdd + ClosedNeg ); impl AbstractModule for MatrixMN where N: Scalar + RingCommutative, DefaultAllocator: Allocator { type AbstractRing = N; #[inline] fn multiply_by(&self, n: N) -> Self { self * n } } impl Module for MatrixMN where N: Scalar + RingCommutative, DefaultAllocator: Allocator { type Ring = N; } impl VectorSpace for MatrixMN where N: Scalar + Field, DefaultAllocator: Allocator { type Field = N; } impl FiniteDimVectorSpace for MatrixMN where N: Scalar + Field, DefaultAllocator: Allocator { #[inline] fn dimension() -> usize { R::dim() * C::dim() } #[inline] fn canonical_basis_element(i: usize) -> Self { assert!(i < Self::dimension(), "Index out of bound."); let mut res = Self::zero(); unsafe { *res.data.get_unchecked_linear_mut(i) = N::one(); } res } #[inline] fn dot(&self, other: &Self) -> N { self.dot(other) } #[inline] unsafe fn component_unchecked(&self, i: usize) -> &N { self.data.get_unchecked_linear(i) } #[inline] unsafe fn component_unchecked_mut(&mut self, i: usize) -> &mut N { self.data.get_unchecked_linear_mut(i) } } impl NormedSpace for MatrixMN where DefaultAllocator: Allocator { #[inline] fn norm_squared(&self) -> N { self.norm_squared() } #[inline] fn norm(&self) -> N { self.norm() } #[inline] fn normalize(&self) -> Self { self.normalize() } #[inline] fn normalize_mut(&mut self) -> N { self.normalize_mut() } #[inline] fn try_normalize(&self, min_norm: N) -> Option { self.try_normalize(min_norm) } #[inline] fn try_normalize_mut(&mut self, min_norm: N) -> Option { self.try_normalize_mut(min_norm) } } impl InnerSpace for MatrixMN where DefaultAllocator: Allocator { type Real = N; #[inline] fn angle(&self, other: &Self) -> N { self.angle(other) } #[inline] fn inner_product(&self, other: &Self) -> N { self.dot(other) } } // FIXME: specialization will greatly simplify this implementation in the future. // In particular: // − use `x()` instead of `::canonical_basis_element` // − use `::new(x, y, z)` instead of `::from_slice` impl FiniteDimInnerSpace for MatrixMN where DefaultAllocator: Allocator { #[inline] fn orthonormalize(vs: &mut [MatrixMN]) -> usize { let mut nbasis_elements = 0; for i in 0 .. vs.len() { { let (elt, basis) = vs[.. i + 1].split_last_mut().unwrap(); for basis_element in &basis[.. nbasis_elements] { *elt -= &*basis_element * elt.dot(basis_element) } } if vs[i].try_normalize_mut(N::zero()).is_some() { // FIXME: this will be efficient on dynamically-allocated vectors but for // statically-allocated ones, `.clone_from` would be better. vs.swap(nbasis_elements, i); nbasis_elements += 1; // All the other vectors will be dependent. if nbasis_elements == Self::dimension() { break; } } } nbasis_elements } #[inline] fn orthonormal_subspace_basis(vs: &[Self], mut f: F) where F: FnMut(&Self) -> bool { // FIXME: is this necessary? assert!(vs.len() <= Self::dimension(), "The given set of vectors has no chance of being a free family."); match Self::dimension() { 1 => { if vs.len() == 0 { f(&Self::canonical_basis_element(0)); } }, 2 => { if vs.len() == 0 { let _ = f(&Self::canonical_basis_element(0)) && f(&Self::canonical_basis_element(1)); } else if vs.len() == 1 { let v = &vs[0]; let res = Self::from_column_slice(&[-v[1], v[0]]); f(&res.normalize()); } // Otherwise, nothing. }, 3 => { if vs.len() == 0 { let _ = f(&Self::canonical_basis_element(0)) && f(&Self::canonical_basis_element(1)) && f(&Self::canonical_basis_element(2)); } else if vs.len() == 1 { let v = &vs[0]; let mut a; if v[0].abs() > v[1].abs() { a = Self::from_column_slice(&[v[2], N::zero(), -v[0]]); } else { a = Self::from_column_slice(&[N::zero(), -v[2], v[1]]); }; let _ = a.normalize_mut(); if f(&a.cross(v)) { f(&a); } } else if vs.len() == 2 { f(&vs[0].cross(&vs[1]).normalize()); } }, _ => { // XXX: use a GenericArray instead. let mut known_basis = Vec::new(); for v in vs.iter() { known_basis.push(v.normalize()) } for i in 0 .. Self::dimension() - vs.len() { let mut elt = Self::canonical_basis_element(i); for v in &known_basis { elt -= v * elt.dot(v) }; if let Some(subsp_elt) = elt.try_normalize(N::zero()) { if !f(&subsp_elt) { return }; known_basis.push(subsp_elt); } } } } } } /* * * * Multiplicative structures. * * */ impl Identity for MatrixN where N: Scalar + Zero + One, DefaultAllocator: Allocator { #[inline] fn identity() -> Self { Self::identity() } } impl AbstractMagma for MatrixN where N: Scalar + Zero + One + ClosedAdd + ClosedMul, DefaultAllocator: Allocator { #[inline] fn operate(&self, other: &Self) -> Self { self * other } } macro_rules! impl_multiplicative_structure( ($($marker: ident<$operator: ident> $(+ $bounds: ident)*),* $(,)*) => {$( impl $marker<$operator> for MatrixN where N: Scalar + Zero + One + ClosedAdd + ClosedMul + $marker<$operator> $(+ $bounds)*, DefaultAllocator: Allocator { } )*} ); impl_multiplicative_structure!( AbstractSemigroup, AbstractMonoid + One ); /* * * Ordering * */ impl MeetSemilattice for MatrixMN where N: Scalar + MeetSemilattice, DefaultAllocator: Allocator { #[inline] fn meet(&self, other: &Self) -> Self { self.zip_map(other, |a, b| a.meet(&b)) } } impl JoinSemilattice for MatrixMN where N: Scalar + JoinSemilattice, DefaultAllocator: Allocator { #[inline] fn join(&self, other: &Self) -> Self { self.zip_map(other, |a, b| a.join(&b)) } } impl Lattice for MatrixMN where N: Scalar + Lattice, DefaultAllocator: Allocator { #[inline] fn meet_join(&self, other: &Self) -> (Self, Self) { let shape = self.data.shape(); assert!(shape == other.data.shape(), "Matrix meet/join error: mismatched dimensions."); let mut mres = unsafe { Self::new_uninitialized_generic(shape.0, shape.1) }; let mut jres = unsafe { Self::new_uninitialized_generic(shape.0, shape.1) }; for i in 0 .. shape.0.value() * shape.1.value() { unsafe { let mj = self.data.get_unchecked_linear(i).meet_join(other.data.get_unchecked_linear(i)); *mres.data.get_unchecked_linear_mut(i) = mj.0; *jres.data.get_unchecked_linear_mut(i) = mj.1; } } (mres, jres) } }