use alga::general::Complex; use base::allocator::Allocator; use base::constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; use base::{DefaultAllocator, Matrix, Scalar, Unit, Vector}; use dimension::{Dim, DimName, U1}; use storage::{Storage, StorageMut}; use geometry::Point; /// A reflection wrt. a plane. pub struct Reflection> { axis: Vector, bias: N, } impl> Reflection { /// Creates a new reflection wrt the plane orthogonal to the given axis and bias. /// /// The bias is the position of the plane on the axis. In particular, a bias equal to zero /// represents a plane that passes through the origin. pub fn new(axis: Unit>, bias: N) -> Self { Self { axis: axis.into_inner(), bias, } } /// Creates a new reflection wrt. the plane orthogonal to the given axis and that contains the /// point `pt`. pub fn new_containing_point( axis: Unit>, pt: &Point, ) -> Self where D: DimName, DefaultAllocator: Allocator, { let bias = axis.dotc(&pt.coords); Self::new(axis, bias) } /// The reflexion axis. pub fn axis(&self) -> &Vector { &self.axis } // FIXME: naming convention: reflect_to, reflect_assign ? /// Applies the reflection to the columns of `rhs`. pub fn reflect(&self, rhs: &mut Matrix) where S2: StorageMut, ShapeConstraint: SameNumberOfRows, { for i in 0..rhs.ncols() { // NOTE: we borrow the column twice here. First it is borrowed immutably for the // dot product, and then mutably. Somehow, this allows significantly // better optimizations of the dot product from the compiler. let m_two: N = ::convert(-2.0f64); let factor = (self.axis.dotc(&rhs.column(i)) - self.bias) * m_two; rhs.column_mut(i).axpy(factor, &self.axis, N::one()); } } // FIXME: naming convention: reflect_to, reflect_assign ? /// Applies the reflection to the columns of `rhs`. pub fn reflect_with_sign(&self, rhs: &mut Matrix, sign: N) where S2: StorageMut, ShapeConstraint: SameNumberOfRows, { for i in 0..rhs.ncols() { // NOTE: we borrow the column twice here. First it is borrowed immutably for the // dot product, and then mutably. Somehow, this allows significantly // better optimizations of the dot product from the compiler. let m_two = sign.scale(::convert(-2.0f64)); let factor = (self.axis.dotc(&rhs.column(i)) - self.bias) * m_two; rhs.column_mut(i).axpy(factor, &self.axis, sign); } } /// Applies the reflection to the rows of `lhs`. pub fn reflect_rows( &self, lhs: &mut Matrix, work: &mut Vector, ) where S2: StorageMut, S3: StorageMut, ShapeConstraint: DimEq + AreMultipliable, DefaultAllocator: Allocator { lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); } let m_two: N = ::convert(-2.0f64); lhs.gerc(m_two, &work, &self.axis, N::one()); } /// Applies the reflection to the rows of `lhs`. pub fn reflect_rows_with_sign( &self, lhs: &mut Matrix, work: &mut Vector, sign: N, ) where S2: StorageMut, S3: StorageMut, ShapeConstraint: DimEq + AreMultipliable, DefaultAllocator: Allocator { lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); } let m_two = sign.scale(::convert(-2.0f64)); lhs.gerc(m_two, &work, &self.axis, sign); } }