forked from M-Labs/nalgebra
WIP use Complex instead of Real whenever possible on the linalg module.
This commit is contained in:
parent
7c91f2eeb5
commit
77f048b6b9
@ -26,7 +26,7 @@ arbitrary = [ "quickcheck" ]
|
|||||||
serde-serialize = [ "serde", "serde_derive", "num-complex/serde" ]
|
serde-serialize = [ "serde", "serde_derive", "num-complex/serde" ]
|
||||||
abomonation-serialize = [ "abomonation" ]
|
abomonation-serialize = [ "abomonation" ]
|
||||||
sparse = [ ]
|
sparse = [ ]
|
||||||
debug = [ ]
|
debug = [ "approx/num-complex", "rand/std" ]
|
||||||
alloc = [ ]
|
alloc = [ ]
|
||||||
io = [ "pest", "pest_derive" ]
|
io = [ "pest", "pest_derive" ]
|
||||||
|
|
||||||
@ -53,3 +53,6 @@ rand_xorshift = "0.1"
|
|||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = [ "nalgebra-lapack", "nalgebra-glm" ]
|
members = [ "nalgebra-lapack", "nalgebra-glm" ]
|
||||||
|
|
||||||
|
[patch.crates-io]
|
||||||
|
alga = { path = "../alga/alga" }
|
||||||
|
118
src/base/blas.rs
118
src/base/blas.rs
@ -13,6 +13,39 @@ use base::dimension::{Dim, Dynamic, U1, U2, U3, U4};
|
|||||||
use base::storage::{Storage, StorageMut};
|
use base::storage::{Storage, StorageMut};
|
||||||
use base::{DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector};
|
use base::{DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector};
|
||||||
|
|
||||||
|
|
||||||
|
// FIXME: find a way to avoid code duplication just for complex number support.
|
||||||
|
impl<N: Complex, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
|
||||||
|
/// Computes the index of the vector component with the largest complex or real absolute value.
|
||||||
|
///
|
||||||
|
/// # Examples:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// # use num_complex::Complex;
|
||||||
|
/// # use nalgebra::Vector3;
|
||||||
|
/// let vec = Vector3::new(Complex::new(11.0, 3.0), Complex::new(-15.0, 0.0), Complex::new(13.0, 5.0));
|
||||||
|
/// assert_eq!(vec.icamax(), 2);
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
pub fn icamax(&self) -> usize {
|
||||||
|
assert!(!self.is_empty(), "The input vector must not be empty.");
|
||||||
|
|
||||||
|
let mut the_max = unsafe { self.vget_unchecked(0).asum() };
|
||||||
|
let mut the_i = 0;
|
||||||
|
|
||||||
|
for i in 1..self.nrows() {
|
||||||
|
let val = unsafe { self.vget_unchecked(i).asum() };
|
||||||
|
|
||||||
|
if val > the_max {
|
||||||
|
the_max = val;
|
||||||
|
the_i = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
the_i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<N: Scalar + PartialOrd, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
|
impl<N: Scalar + PartialOrd, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
|
||||||
/// Computes the index and value of the vector component with the largest value.
|
/// Computes the index and value of the vector component with the largest value.
|
||||||
///
|
///
|
||||||
@ -157,6 +190,41 @@ impl<N: Scalar + PartialOrd, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: find a way to avoid code duplication just for complex number support.
|
||||||
|
impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
|
/// Computes the index of the matrix component with the largest absolute value.
|
||||||
|
///
|
||||||
|
/// # Examples:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::Matrix2x3;
|
||||||
|
/// let mat = Matrix2x3::new(Complex::new(11.0, 1.0), Complex::new(-12.0, 2.0), Complex::new(13.0, 3.0),
|
||||||
|
/// Complex::new(21.0, 43.0), Complex::new(22.0, 5.0), Complex::new(-23.0, 0.0);
|
||||||
|
/// assert_eq!(mat.iamax_full(), (1, 0));
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
pub fn icamax_full(&self) -> (usize, usize) {
|
||||||
|
assert!(!self.is_empty(), "The input matrix must not be empty.");
|
||||||
|
|
||||||
|
let mut the_max = unsafe { self.get_unchecked((0, 0)).asum() };
|
||||||
|
let mut the_ij = (0, 0);
|
||||||
|
|
||||||
|
for j in 0..self.ncols() {
|
||||||
|
for i in 0..self.nrows() {
|
||||||
|
let val = unsafe { self.get_unchecked((i, j)).asum() };
|
||||||
|
|
||||||
|
if val > the_max {
|
||||||
|
the_max = val;
|
||||||
|
the_ij = (i, j);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
the_ij
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Computes the index of the matrix component with the largest absolute value.
|
/// Computes the index of the matrix component with the largest absolute value.
|
||||||
///
|
///
|
||||||
@ -705,6 +773,56 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: duplicate code
|
||||||
|
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
|
||||||
|
where N: Complex + Zero + ClosedAdd + ClosedMul
|
||||||
|
{
|
||||||
|
/// Computes `self = alpha * x * y.transpose() + beta * self`.
|
||||||
|
///
|
||||||
|
/// If `beta` is zero, `self` is never read.
|
||||||
|
///
|
||||||
|
/// # Examples:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::{Matrix2x3, Vector2, Vector3};
|
||||||
|
/// let mut mat = Matrix2x3::repeat(4.0);
|
||||||
|
/// let vec1 = Vector2::new(1.0, 2.0);
|
||||||
|
/// let vec2 = Vector3::new(0.1, 0.2, 0.3);
|
||||||
|
/// let expected = vec1 * vec2.transpose() * 10.0 + mat * 5.0;
|
||||||
|
///
|
||||||
|
/// mat.ger(10.0, &vec1, &vec2, 5.0);
|
||||||
|
/// assert_eq!(mat, expected);
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
pub fn gerc<D2: Dim, D3: Dim, SB, SC>(
|
||||||
|
&mut self,
|
||||||
|
alpha: N,
|
||||||
|
x: &Vector<N, D2, SB>,
|
||||||
|
y: &Vector<N, D3, SC>,
|
||||||
|
beta: N,
|
||||||
|
) where
|
||||||
|
N: One,
|
||||||
|
SB: Storage<N, D2>,
|
||||||
|
SC: Storage<N, D3>,
|
||||||
|
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>,
|
||||||
|
{
|
||||||
|
let (nrows1, ncols1) = self.shape();
|
||||||
|
let dim2 = x.nrows();
|
||||||
|
let dim3 = y.nrows();
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
nrows1 == dim2 && ncols1 == dim3,
|
||||||
|
"ger: dimensions mismatch."
|
||||||
|
);
|
||||||
|
|
||||||
|
for j in 0..ncols1 {
|
||||||
|
// FIXME: avoid bound checks.
|
||||||
|
let val = unsafe { y.vget_unchecked(j).conjugate() };
|
||||||
|
self.column_mut(j).axpy(alpha * val, x, beta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
|
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
|
||||||
where N: Scalar + Zero + ClosedAdd + ClosedMul
|
where N: Scalar + Zero + ClosedAdd + ClosedMul
|
||||||
{
|
{
|
||||||
|
@ -944,6 +944,47 @@ impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
res
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The conjugate of `self`.
|
||||||
|
#[inline]
|
||||||
|
pub fn conjugate(&self) -> MatrixMN<N, R, C>
|
||||||
|
where DefaultAllocator: Allocator<N, R, C> {
|
||||||
|
self.map(|e| e.conjugate())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Divides each component of `self` by the given real.
|
||||||
|
#[inline]
|
||||||
|
pub fn unscale(&self, real: N::Real) -> MatrixMN<N, R, C>
|
||||||
|
where DefaultAllocator: Allocator<N, R, C> {
|
||||||
|
self.map(|e| e.unscale(real))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Multiplies each component of `self` by the given real.
|
||||||
|
#[inline]
|
||||||
|
pub fn scale(&self, real: N::Real) -> MatrixMN<N, R, C>
|
||||||
|
where DefaultAllocator: Allocator<N, R, C> {
|
||||||
|
self.map(|e| e.scale(real))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<N: Complex, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
|
||||||
|
/// The conjugate of `self` computed in-place.
|
||||||
|
#[inline]
|
||||||
|
pub fn conjugate_mut(&mut self) {
|
||||||
|
self.apply(|e| e.conjugate())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Divides each component of `self` by the given real.
|
||||||
|
#[inline]
|
||||||
|
pub fn unscale_mut(&mut self, real: N::Real) {
|
||||||
|
self.apply(|e| e.unscale(real))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Multiplies each component of `self` by the given real.
|
||||||
|
#[inline]
|
||||||
|
pub fn scale_mut(&mut self, real: N::Real) {
|
||||||
|
self.apply(|e| e.scale(real))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Complex, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
|
impl<N: Complex, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
|
||||||
@ -1013,6 +1054,34 @@ impl<N: Scalar, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<N: Complex, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
|
/// The symmetric part of `self`, i.e., `0.5 * (self + self.transpose())`.
|
||||||
|
#[inline]
|
||||||
|
pub fn symmetric_part(&self) -> MatrixMN<N, D, D>
|
||||||
|
where DefaultAllocator: Allocator<N, D, D> {
|
||||||
|
assert!(self.is_square(), "Cannot compute the symmetric part of a non-square matrix.");
|
||||||
|
let mut tr = self.transpose();
|
||||||
|
tr += self;
|
||||||
|
tr *= ::convert::<_, N>(0.5);
|
||||||
|
tr
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The hermitian part of `self`, i.e., `0.5 * (self + self.conjugate_transpose())`.
|
||||||
|
#[inline]
|
||||||
|
pub fn hermitian_part(&self) -> MatrixMN<N, D, D>
|
||||||
|
where DefaultAllocator: Allocator<N, D, D> {
|
||||||
|
assert!(self.is_square(), "Cannot compute the hermitian part of a non-square matrix.");
|
||||||
|
let nrows = self.data.shape().0;
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let mut tr = self.conjugate_transpose();
|
||||||
|
tr += self;
|
||||||
|
tr *= ::convert::<_, N>(0.5);
|
||||||
|
tr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<N: Scalar + One + Zero, D: DimAdd<U1> + IsNotStaticOne, S: Storage<N, D, D>> Matrix<N, D, D, S> {
|
impl<N: Scalar + One + Zero, D: DimAdd<U1> + IsNotStaticOne, S: Storage<N, D, D>> Matrix<N, D, D, S> {
|
||||||
|
|
||||||
/// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and
|
/// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and
|
||||||
|
@ -5,7 +5,7 @@ use std::ops::{
|
|||||||
Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign,
|
Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign,
|
||||||
};
|
};
|
||||||
|
|
||||||
use alga::general::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
|
use alga::general::{Complex, ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub};
|
||||||
|
|
||||||
use base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
|
use base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
|
||||||
use base::constraint::{
|
use base::constraint::{
|
||||||
@ -760,6 +760,25 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// XXX: avoid code duplication.
|
||||||
|
impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
|
/// Returns the absolute value of the component with the largest absolute value.
|
||||||
|
#[inline]
|
||||||
|
pub fn camax(&self) -> N::Real {
|
||||||
|
let mut max = N::Real::zero();
|
||||||
|
|
||||||
|
for e in self.iter() {
|
||||||
|
let ae = e.asum();
|
||||||
|
|
||||||
|
if ae > max {
|
||||||
|
max = ae;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
max
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Returns the absolute value of the component with the largest absolute value.
|
/// Returns the absolute value of the component with the largest absolute value.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
use approx::RelativeEq;
|
use approx::RelativeEq;
|
||||||
use num::{One, Zero};
|
use num::{One, Zero};
|
||||||
|
|
||||||
use alga::general::{ClosedAdd, ClosedMul, Real};
|
use alga::general::{ClosedAdd, ClosedMul, Real, Complex};
|
||||||
|
|
||||||
use base::allocator::Allocator;
|
use base::allocator::Allocator;
|
||||||
use base::dimension::{Dim, DimMin};
|
use base::dimension::{Dim, DimMin};
|
||||||
@ -82,7 +82,9 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
|
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
||||||
/// Checks that `Mᵀ × M = Id`.
|
/// Checks that `Mᵀ × M = Id`.
|
||||||
///
|
///
|
||||||
/// In this definition `Id` is approximately equal to the identity matrix with a relative error
|
/// In this definition `Id` is approximately equal to the identity matrix with a relative error
|
||||||
@ -93,9 +95,10 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
|
|||||||
N: Zero + One + ClosedAdd + ClosedMul + RelativeEq,
|
N: Zero + One + ClosedAdd + ClosedMul + RelativeEq,
|
||||||
S: Storage<N, R, C>,
|
S: Storage<N, R, C>,
|
||||||
N::Epsilon: Copy,
|
N::Epsilon: Copy,
|
||||||
DefaultAllocator: Allocator<N, C, C>,
|
DefaultAllocator: Allocator<N, R, C> + Allocator<N, C, C>,
|
||||||
{
|
{
|
||||||
(self.tr_mul(self)).is_identity(eps)
|
// FIXME: add a conjugate-transpose-mul
|
||||||
|
(self.conjugate().tr_mul(self)).is_identity(eps)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use alga::general::Real;
|
use alga::general::Complex;
|
||||||
use base::allocator::Allocator;
|
use base::allocator::Allocator;
|
||||||
use base::constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint};
|
use base::constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint};
|
||||||
use base::{DefaultAllocator, Matrix, Scalar, Unit, Vector};
|
use base::{DefaultAllocator, Matrix, Scalar, Unit, Vector};
|
||||||
@ -13,7 +13,7 @@ pub struct Reflection<N: Scalar, D: Dim, S: Storage<N, D>> {
|
|||||||
bias: N,
|
bias: N,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
|
impl<N: Complex, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
|
||||||
/// Creates a new reflection wrt the plane orthogonal to the given axis and bias.
|
/// Creates a new reflection wrt the plane orthogonal to the given axis and bias.
|
||||||
///
|
///
|
||||||
/// The bias is the position of the plane on the axis. In particular, a bias equal to zero
|
/// The bias is the position of the plane on the axis. In particular, a bias equal to zero
|
||||||
@ -21,7 +21,7 @@ impl<N: Real, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
|
|||||||
pub fn new(axis: Unit<Vector<N, D, S>>, bias: N) -> Self {
|
pub fn new(axis: Unit<Vector<N, D, S>>, bias: N) -> Self {
|
||||||
Self {
|
Self {
|
||||||
axis: axis.into_inner(),
|
axis: axis.into_inner(),
|
||||||
bias: bias,
|
bias,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -35,7 +35,7 @@ impl<N: Real, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
|
|||||||
D: DimName,
|
D: DimName,
|
||||||
DefaultAllocator: Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D>,
|
||||||
{
|
{
|
||||||
let bias = pt.coords.dot(axis.as_ref());
|
let bias = axis.cdot(&pt.coords);
|
||||||
Self::new(axis, bias)
|
Self::new(axis, bias)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ impl<N: Real, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
|
|||||||
// dot product, and then mutably. Somehow, this allows significantly
|
// dot product, and then mutably. Somehow, this allows significantly
|
||||||
// better optimizations of the dot product from the compiler.
|
// better optimizations of the dot product from the compiler.
|
||||||
let m_two: N = ::convert(-2.0f64);
|
let m_two: N = ::convert(-2.0f64);
|
||||||
let factor = (rhs.column(i).dot(&self.axis) - self.bias) * m_two;
|
let factor = (self.axis.cdot(&rhs.column(i)) - self.bias) * m_two;
|
||||||
rhs.column_mut(i).axpy(factor, &self.axis, N::one());
|
rhs.column_mut(i).axpy(factor, &self.axis, N::one());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -70,8 +70,9 @@ impl<N: Real, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
|
|||||||
S2: StorageMut<N, R2, C2>,
|
S2: StorageMut<N, R2, C2>,
|
||||||
S3: StorageMut<N, R2>,
|
S3: StorageMut<N, R2>,
|
||||||
ShapeConstraint: DimEq<C2, D> + AreMultipliable<R2, C2, D, U1>,
|
ShapeConstraint: DimEq<C2, D> + AreMultipliable<R2, C2, D, U1>,
|
||||||
|
DefaultAllocator: Allocator<N, D>
|
||||||
{
|
{
|
||||||
rhs.mul_to(&self.axis, work);
|
rhs.mul_to(&self.axis.conjugate(), work);
|
||||||
|
|
||||||
if !self.bias.is_zero() {
|
if !self.bias.is_zero() {
|
||||||
work.add_scalar_mut(-self.bias);
|
work.add_scalar_mut(-self.bias);
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use alga::general::Real;
|
use alga::general::Complex;
|
||||||
use allocator::Allocator;
|
use allocator::Allocator;
|
||||||
use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Unit, VectorN};
|
use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Unit, VectorN};
|
||||||
use constraint::{DimEq, ShapeConstraint};
|
use constraint::{DimEq, ShapeConstraint};
|
||||||
@ -38,7 +38,7 @@ use linalg::householder;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Bidiagonal<N: Real, R: DimMin<C>, C: Dim>
|
pub struct Bidiagonal<N: Complex, R: DimMin<C>, C: Dim>
|
||||||
where
|
where
|
||||||
DimMinimum<R, C>: DimSub<U1>,
|
DimMinimum<R, C>: DimSub<U1>,
|
||||||
DefaultAllocator: Allocator<N, R, C>
|
DefaultAllocator: Allocator<N, R, C>
|
||||||
@ -55,7 +55,7 @@ where
|
|||||||
upper_diagonal: bool,
|
upper_diagonal: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, R: DimMin<C>, C: Dim> Copy for Bidiagonal<N, R, C>
|
impl<N: Complex, R: DimMin<C>, C: Dim> Copy for Bidiagonal<N, R, C>
|
||||||
where
|
where
|
||||||
DimMinimum<R, C>: DimSub<U1>,
|
DimMinimum<R, C>: DimSub<U1>,
|
||||||
DefaultAllocator: Allocator<N, R, C>
|
DefaultAllocator: Allocator<N, R, C>
|
||||||
@ -66,7 +66,7 @@ where
|
|||||||
VectorN<N, DimDiff<DimMinimum<R, C>, U1>>: Copy,
|
VectorN<N, DimDiff<DimMinimum<R, C>, U1>>: Copy,
|
||||||
{}
|
{}
|
||||||
|
|
||||||
impl<N: Real, R: DimMin<C>, C: Dim> Bidiagonal<N, R, C>
|
impl<N: Complex, R: DimMin<C>, C: Dim> Bidiagonal<N, R, C>
|
||||||
where
|
where
|
||||||
DimMinimum<R, C>: DimSub<U1>,
|
DimMinimum<R, C>: DimSub<U1>,
|
||||||
DefaultAllocator: Allocator<N, R, C>
|
DefaultAllocator: Allocator<N, R, C>
|
||||||
@ -273,7 +273,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// impl<N: Real, D: DimMin<D, Output = D> + DimSub<Dynamic>> Bidiagonal<N, D, D>
|
// impl<N: Complex, D: DimMin<D, Output = D> + DimSub<Dynamic>> Bidiagonal<N, D, D>
|
||||||
// where DefaultAllocator: Allocator<N, D, D> +
|
// where DefaultAllocator: Allocator<N, D, D> +
|
||||||
// Allocator<N, D> {
|
// Allocator<N, D> {
|
||||||
// /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined.
|
// /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined.
|
||||||
@ -346,7 +346,7 @@ where
|
|||||||
// // }
|
// // }
|
||||||
// }
|
// }
|
||||||
|
|
||||||
impl<N: Real, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
impl<N: Complex, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
||||||
where
|
where
|
||||||
DimMinimum<R, C>: DimSub<U1>,
|
DimMinimum<R, C>: DimSub<U1>,
|
||||||
DefaultAllocator: Allocator<N, R, C>
|
DefaultAllocator: Allocator<N, R, C>
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use alga::general::Real;
|
use num::Zero;
|
||||||
|
use alga::general::Complex;
|
||||||
|
|
||||||
use allocator::Allocator;
|
use allocator::Allocator;
|
||||||
use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix};
|
use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix};
|
||||||
@ -26,19 +27,19 @@ use storage::{Storage, StorageMut};
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Cholesky<N: Real, D: Dim>
|
pub struct Cholesky<N: Complex, D: Dim>
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
chol: MatrixN<N, D>,
|
chol: MatrixN<N, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: Dim> Copy for Cholesky<N, D>
|
impl<N: Complex, D: Dim> Copy for Cholesky<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
MatrixN<N, D>: Copy,
|
MatrixN<N, D>: Copy,
|
||||||
{}
|
{}
|
||||||
|
|
||||||
impl<N: Real, D: DimSub<Dynamic>> Cholesky<N, D>
|
impl<N: Complex, D: DimSub<Dynamic>> Cholesky<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
/// Attempts to compute the Cholesky decomposition of `matrix`.
|
/// Attempts to compute the Cholesky decomposition of `matrix`.
|
||||||
@ -62,7 +63,7 @@ where DefaultAllocator: Allocator<N, D, D>
|
|||||||
}
|
}
|
||||||
|
|
||||||
let diag = unsafe { *matrix.get_unchecked((j, j)) };
|
let diag = unsafe { *matrix.get_unchecked((j, j)) };
|
||||||
if diag > N::zero() {
|
if diag.real() > N::Real::zero() {
|
||||||
let denom = diag.sqrt();
|
let denom = diag.sqrt();
|
||||||
unsafe {
|
unsafe {
|
||||||
*matrix.get_unchecked_mut((j, j)) = denom;
|
*matrix.get_unchecked_mut((j, j)) = denom;
|
||||||
@ -144,7 +145,7 @@ where DefaultAllocator: Allocator<N, D, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: DimSub<Dynamic>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
impl<N: Complex, D: DimSub<Dynamic>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
/// Attempts to compute the Cholesky decomposition of this matrix.
|
/// Attempts to compute the Cholesky decomposition of this matrix.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use alga::general::Real;
|
use alga::general::Complex;
|
||||||
|
|
||||||
use base::allocator::Allocator;
|
use base::allocator::Allocator;
|
||||||
use base::dimension::DimMin;
|
use base::dimension::DimMin;
|
||||||
@ -7,7 +7,7 @@ use base::{DefaultAllocator, SquareMatrix};
|
|||||||
|
|
||||||
use linalg::LU;
|
use linalg::LU;
|
||||||
|
|
||||||
impl<N: Real, D: DimMin<D, Output = D>, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
impl<N: Complex, D: DimMin<D, Output = D>, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
/// Computes the matrix determinant.
|
/// Computes the matrix determinant.
|
||||||
///
|
///
|
||||||
/// If the matrix has a dimension larger than 3, an LU decomposition is used.
|
/// If the matrix has a dimension larger than 3, an LU decomposition is used.
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
use serde::{Serialize, Deserialize};
|
use serde::{Serialize, Deserialize};
|
||||||
|
|
||||||
use alga::general::Real;
|
use alga::general::Complex;
|
||||||
use num_complex::Complex;
|
use num_complex::Complex;
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
@ -17,7 +17,7 @@ use geometry::{Reflection, UnitComplex};
|
|||||||
use linalg::householder;
|
use linalg::householder;
|
||||||
use linalg::RealSchur;
|
use linalg::RealSchur;
|
||||||
|
|
||||||
/// Eigendecomposition of a matrix with real eigenvalues.
|
/// Eigendecomposition of a real matrix with real eigenvalues (or complex eigen values for complex matrices).
|
||||||
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
|
||||||
#[cfg_attr(
|
#[cfg_attr(
|
||||||
feature = "serde-serialize",
|
feature = "serde-serialize",
|
||||||
@ -40,7 +40,7 @@ use linalg::RealSchur;
|
|||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct RealEigen<N: Real, D: Dim>
|
pub struct Eigen<N: Complex, D: Dim>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
|
||||||
{
|
{
|
||||||
@ -48,7 +48,7 @@ where
|
|||||||
pub eigenvalues: VectorN<N, D>,
|
pub eigenvalues: VectorN<N, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: Dim> Copy for RealEigen<N, D>
|
impl<N: Complex, D: Dim> Copy for Eigen<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
|
||||||
MatrixN<N, D>: Copy,
|
MatrixN<N, D>: Copy,
|
||||||
@ -56,7 +56,7 @@ where
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: Dim> RealEigen<N, D>
|
impl<N: Complex, D: Dim> Eigen<N, D>
|
||||||
where
|
where
|
||||||
D: DimSub<U1>, // For Hessenberg.
|
D: DimSub<U1>, // For Hessenberg.
|
||||||
ShapeConstraint: DimEq<Dynamic, DimDiff<D, U1>>, // For Hessenberg.
|
ShapeConstraint: DimEq<Dynamic, DimDiff<D, U1>>, // For Hessenberg.
|
||||||
@ -68,8 +68,8 @@ where
|
|||||||
DefaultAllocator: Allocator<usize, D, D>,
|
DefaultAllocator: Allocator<usize, D, D>,
|
||||||
MatrixN<N, D>: Display,
|
MatrixN<N, D>: Display,
|
||||||
{
|
{
|
||||||
/// Computes the eigendecomposition of a diagonalizable matrix with real eigenvalues.
|
/// Computes the eigendecomposition of a diagonalizable matrix with Complex eigenvalues.
|
||||||
pub fn new(m: MatrixN<N, D>) -> Option<RealEigen<N, D>> {
|
pub fn new(m: MatrixN<N, D>) -> Option<Eigen<N, D>> {
|
||||||
assert!(
|
assert!(
|
||||||
m.is_square(),
|
m.is_square(),
|
||||||
"Unable to compute the eigendecomposition of a non-square matrix."
|
"Unable to compute the eigendecomposition of a non-square matrix."
|
||||||
@ -80,7 +80,7 @@ where
|
|||||||
|
|
||||||
println!("Schur eigenvalues: {}", eigenvalues);
|
println!("Schur eigenvalues: {}", eigenvalues);
|
||||||
|
|
||||||
// Check that the eigenvalues are all real.
|
// Check that the eigenvalues are all Complex.
|
||||||
for i in 0..dim - 1 {
|
for i in 0..dim - 1 {
|
||||||
if !eigenvalues[(i + 1, i)].is_zero() {
|
if !eigenvalues[(i + 1, i)].is_zero() {
|
||||||
return None;
|
return None;
|
||||||
@ -112,8 +112,8 @@ where
|
|||||||
let _ = eigenvectors.column_mut(i).normalize_mut();
|
let _ = eigenvectors.column_mut(i).normalize_mut();
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(RealEigen {
|
Some(Eigen {
|
||||||
eigenvectors: eigenvectors,
|
eigenvectors,
|
||||||
eigenvalues: eigenvalues.diagonal(),
|
eigenvalues: eigenvalues.diagonal(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use alga::general::Real;
|
use alga::general::Complex;
|
||||||
use allocator::Allocator;
|
use allocator::Allocator;
|
||||||
use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN};
|
use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN};
|
||||||
use constraint::{SameNumberOfRows, ShapeConstraint};
|
use constraint::{SameNumberOfRows, ShapeConstraint};
|
||||||
@ -32,7 +32,7 @@ use linalg::PermutationSequence;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct FullPivLU<N: Real, R: DimMin<C>, C: Dim>
|
pub struct FullPivLU<N: Complex, R: DimMin<C>, C: Dim>
|
||||||
where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>
|
where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>
|
||||||
{
|
{
|
||||||
lu: MatrixMN<N, R, C>,
|
lu: MatrixMN<N, R, C>,
|
||||||
@ -40,14 +40,14 @@ where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimu
|
|||||||
q: PermutationSequence<DimMinimum<R, C>>,
|
q: PermutationSequence<DimMinimum<R, C>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, R: DimMin<C>, C: Dim> Copy for FullPivLU<N, R, C>
|
impl<N: Complex, R: DimMin<C>, C: Dim> Copy for FullPivLU<N, R, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>,
|
DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>,
|
||||||
MatrixMN<N, R, C>: Copy,
|
MatrixMN<N, R, C>: Copy,
|
||||||
PermutationSequence<DimMinimum<R, C>>: Copy,
|
PermutationSequence<DimMinimum<R, C>>: Copy,
|
||||||
{}
|
{}
|
||||||
|
|
||||||
impl<N: Real, R: DimMin<C>, C: Dim> FullPivLU<N, R, C>
|
impl<N: Complex, R: DimMin<C>, C: Dim> FullPivLU<N, R, C>
|
||||||
where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>
|
where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>
|
||||||
{
|
{
|
||||||
/// Computes the LU decomposition with full pivoting of `matrix`.
|
/// Computes the LU decomposition with full pivoting of `matrix`.
|
||||||
@ -69,7 +69,7 @@ where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimu
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i in 0..min_nrows_ncols.value() {
|
for i in 0..min_nrows_ncols.value() {
|
||||||
let piv = matrix.slice_range(i.., i..).iamax_full();
|
let piv = matrix.slice_range(i.., i..).icamax_full();
|
||||||
let row_piv = piv.0 + i;
|
let row_piv = piv.0 + i;
|
||||||
let col_piv = piv.1 + i;
|
let col_piv = piv.1 + i;
|
||||||
let diag = matrix[(row_piv, col_piv)];
|
let diag = matrix[(row_piv, col_piv)];
|
||||||
@ -156,7 +156,7 @@ where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: DimMin<D, Output = D>> FullPivLU<N, D, D>
|
impl<N: Complex, D: DimMin<D, Output = D>> FullPivLU<N, D, D>
|
||||||
where DefaultAllocator: Allocator<N, D, D> + Allocator<(usize, usize), D>
|
where DefaultAllocator: Allocator<N, D, D> + Allocator<(usize, usize), D>
|
||||||
{
|
{
|
||||||
/// Solves the linear system `self * x = b`, where `x` is the unknown to be determined.
|
/// Solves the linear system `self * x = b`, where `x` is the unknown to be determined.
|
||||||
@ -261,7 +261,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<(usize, usize), D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
impl<N: Complex, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
||||||
where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>
|
where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>
|
||||||
{
|
{
|
||||||
/// Computes the LU decomposition with full pivoting of `matrix`.
|
/// Computes the LU decomposition with full pivoting of `matrix`.
|
||||||
|
@ -1,36 +1,175 @@
|
|||||||
//! Construction of givens rotations.
|
//! Construction of givens rotations.
|
||||||
|
|
||||||
use alga::general::Real;
|
use alga::general::{Complex, Real};
|
||||||
use num_complex::Complex;
|
use num_complex::Complex as NumComplex;
|
||||||
|
|
||||||
use base::dimension::U2;
|
use base::dimension::{Dim, U2};
|
||||||
use base::storage::Storage;
|
use base::constraint::{ShapeConstraint, DimEq};
|
||||||
use base::Vector;
|
use base::storage::{Storage, StorageMut};
|
||||||
|
use base::{Vector, Matrix};
|
||||||
|
|
||||||
use geometry::UnitComplex;
|
use geometry::UnitComplex;
|
||||||
|
|
||||||
|
/// A Givens rotation.
|
||||||
|
pub struct GivensRotation<N: Complex> {
|
||||||
|
c: N,
|
||||||
|
s: N
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: remove this
|
||||||
/// Computes the rotation `R` required such that the `y` component of `R * v` is zero.
|
/// Computes the rotation `R` required such that the `y` component of `R * v` is zero.
|
||||||
///
|
///
|
||||||
/// Returns `None` if no rotation is needed (i.e. if `v.y == 0`). Otherwise, this returns the norm
|
/// Returns `None` if no rotation is needed (i.e. if `v.y == 0`). Otherwise, this returns the norm
|
||||||
/// of `v` and the rotation `r` such that `R * v = [ |v|, 0.0 ]^t` where `|v|` is the norm of `v`.
|
/// of `v` and the rotation `r` such that `R * v = [ |v|, 0.0 ]^t` where `|v|` is the norm of `v`.
|
||||||
pub fn cancel_y<N: Real, S: Storage<N, U2>>(v: &Vector<N, U2, S>) -> Option<(UnitComplex<N>, N)> {
|
pub fn cancel_y<N: Real, S: Storage<N, U2>>(v: &Vector<N, U2, S>) -> Option<(UnitComplex<N>, N)> {
|
||||||
if !v[1].is_zero() {
|
if !v[1].is_zero() {
|
||||||
let c = Complex::new(v[0], -v[1]);
|
let c = NumComplex::new(v[0], -v[1]);
|
||||||
Some(UnitComplex::from_complex_and_get(c))
|
Some(UnitComplex::from_complex_and_get(c))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// XXX: remove this
|
||||||
/// Computes the rotation `R` required such that the `x` component of `R * v` is zero.
|
/// Computes the rotation `R` required such that the `x` component of `R * v` is zero.
|
||||||
///
|
///
|
||||||
/// Returns `None` if no rotation is needed (i.e. if `v.x == 0`). Otherwise, this returns the norm
|
/// Returns `None` if no rotation is needed (i.e. if `v.x == 0`). Otherwise, this returns the norm
|
||||||
/// of `v` and the rotation `r` such that `R * v = [ 0.0, |v| ]^t` where `|v|` is the norm of `v`.
|
/// of `v` and the rotation `r` such that `R * v = [ 0.0, |v| ]^t` where `|v|` is the norm of `v`.
|
||||||
pub fn cancel_x<N: Real, S: Storage<N, U2>>(v: &Vector<N, U2, S>) -> Option<(UnitComplex<N>, N)> {
|
pub fn cancel_x<N: Real, S: Storage<N, U2>>(v: &Vector<N, U2, S>) -> Option<(UnitComplex<N>, N)> {
|
||||||
if !v[0].is_zero() {
|
if !v[0].is_zero() {
|
||||||
let c = Complex::new(v[1], v[0]);
|
let c = NumComplex::new(v[1], v[0]);
|
||||||
Some(UnitComplex::from_complex_and_get(c))
|
Some(UnitComplex::from_complex_and_get(c))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Matrix = UnitComplex * Matrix
|
||||||
|
impl<N: Complex> GivensRotation<N> {
|
||||||
|
/// Initializes a Givens rotation form its non-normalized cosine an sine components.
|
||||||
|
pub fn new(c: N, s: N) -> Self {
|
||||||
|
let denom = (c.modulus_squared() + s.modulus_squared()).sqrt();
|
||||||
|
Self {
|
||||||
|
c: c.unscale(denom),
|
||||||
|
s: s.unscale(denom)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initializes a Givens rotation form its non-normalized cosine an sine components.
|
||||||
|
pub fn try_new(c: N, s: N, eps: N::Real) -> Option<Self> {
|
||||||
|
let denom = (c.modulus_squared() + s.modulus_squared()).sqrt();
|
||||||
|
|
||||||
|
if denom > eps {
|
||||||
|
Some(Self {
|
||||||
|
c: c.unscale(denom),
|
||||||
|
s: s.unscale(denom)
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes the rotation `R` required such that the `y` component of `R * v` is zero.
|
||||||
|
///
|
||||||
|
/// Returns `None` if no rotation is needed (i.e. if `v.y == 0`). Otherwise, this returns the norm
|
||||||
|
/// of `v` and the rotation `r` such that `R * v = [ |v|, 0.0 ]^t` where `|v|` is the norm of `v`.
|
||||||
|
pub fn cancel_y<S: Storage<N, U2>>(v: &Vector<N, U2, S>) -> Option<(Self, N)> {
|
||||||
|
if !v[1].is_zero() {
|
||||||
|
let (mod0, sign0) = v[0].to_exp();
|
||||||
|
let denom = (mod0 * mod0 + v[1].modulus_squared()).sqrt();
|
||||||
|
let c = N::from_real(mod0 / denom);
|
||||||
|
let s = (sign0 * v[1].conjugate()).unscale(-denom);
|
||||||
|
let r = sign0.scale(denom);
|
||||||
|
Some((Self { c, s }, r))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes the rotation `R` required such that the `x` component of `R * v` is zero.
|
||||||
|
///
|
||||||
|
/// Returns `None` if no rotation is needed (i.e. if `v.x == 0`). Otherwise, this returns the norm
|
||||||
|
/// of `v` and the rotation `r` such that `R * v = [ 0.0, |v| ]^t` where `|v|` is the norm of `v`.
|
||||||
|
pub fn cancel_x<S: Storage<N, U2>>(v: &Vector<N, U2, S>) -> Option<(Self, N)> {
|
||||||
|
if !v[0].is_zero() {
|
||||||
|
let (mod0, sign0) = v[0].to_exp();
|
||||||
|
let denom = (mod0 * mod0 + v[1].modulus_squared()).sqrt();
|
||||||
|
let c = N::from_real(mod0 / denom);
|
||||||
|
let s = (sign0 * v[1].conjugate()).unscale(denom);
|
||||||
|
let r = sign0.scale(denom);
|
||||||
|
Some((Self { c, s }, r))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The cos part of this roration.
|
||||||
|
pub fn c(&self) -> N {
|
||||||
|
self.c
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The sin part of this roration.
|
||||||
|
pub fn s(&self) -> N {
|
||||||
|
self.s
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The inverse of this givens rotation.
|
||||||
|
pub fn inverse(&self) -> Self {
|
||||||
|
Self { c: self.c, s: -self.s.conjugate() }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Performs the multiplication `rhs = self * rhs` in-place.
|
||||||
|
pub fn rotate<R2: Dim, C2: Dim, S2: StorageMut<N, R2, C2>>(
|
||||||
|
&self,
|
||||||
|
rhs: &mut Matrix<N, R2, C2, S2>,
|
||||||
|
) where
|
||||||
|
ShapeConstraint: DimEq<R2, U2>,
|
||||||
|
{
|
||||||
|
assert_eq!(
|
||||||
|
rhs.nrows(),
|
||||||
|
2,
|
||||||
|
"Unit complex rotation: the input matrix must have exactly two rows."
|
||||||
|
);
|
||||||
|
let s = self.s;
|
||||||
|
let c = self.c;
|
||||||
|
|
||||||
|
for j in 0..rhs.ncols() {
|
||||||
|
unsafe {
|
||||||
|
let a = *rhs.get_unchecked((0, j));
|
||||||
|
let b = *rhs.get_unchecked((1, j));
|
||||||
|
|
||||||
|
*rhs.get_unchecked_mut((0, j)) = c * a - s.conjugate() * b;
|
||||||
|
*rhs.get_unchecked_mut((1, j)) = s * a + c.conjugate() * b;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Performs the multiplication `lhs = lhs * self` in-place.
|
||||||
|
pub fn rotate_rows<R2: Dim, C2: Dim, S2: StorageMut<N, R2, C2>>(
|
||||||
|
&self,
|
||||||
|
lhs: &mut Matrix<N, R2, C2, S2>,
|
||||||
|
) where
|
||||||
|
ShapeConstraint: DimEq<C2, U2>,
|
||||||
|
{
|
||||||
|
assert_eq!(
|
||||||
|
lhs.ncols(),
|
||||||
|
2,
|
||||||
|
"Unit complex rotation: the input matrix must have exactly two columns."
|
||||||
|
);
|
||||||
|
let s = self.s;
|
||||||
|
let c = self.c;
|
||||||
|
|
||||||
|
// FIXME: can we optimize that to iterate on one column at a time ?
|
||||||
|
for j in 0..lhs.nrows() {
|
||||||
|
unsafe {
|
||||||
|
let a = *lhs.get_unchecked((j, 0));
|
||||||
|
let b = *lhs.get_unchecked((j, 1));
|
||||||
|
|
||||||
|
*lhs.get_unchecked_mut((j, 0)) = c * a + s * b;
|
||||||
|
*lhs.get_unchecked_mut((j, 1)) = -s.conjugate() * a + c.conjugate() * b;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use alga::general::Real;
|
use alga::general::Complex;
|
||||||
use allocator::Allocator;
|
use allocator::Allocator;
|
||||||
use base::{DefaultAllocator, MatrixMN, MatrixN, SquareMatrix, VectorN};
|
use base::{DefaultAllocator, MatrixMN, MatrixN, SquareMatrix, VectorN};
|
||||||
use constraint::{DimEq, ShapeConstraint};
|
use constraint::{DimEq, ShapeConstraint};
|
||||||
@ -31,21 +31,21 @@ use linalg::householder;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Hessenberg<N: Real, D: DimSub<U1>>
|
pub struct Hessenberg<N: Complex, D: DimSub<U1>>
|
||||||
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
|
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
|
||||||
{
|
{
|
||||||
hess: MatrixN<N, D>,
|
hess: MatrixN<N, D>,
|
||||||
subdiag: VectorN<N, DimDiff<D, U1>>,
|
subdiag: VectorN<N, DimDiff<D, U1>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: DimSub<U1>> Copy for Hessenberg<N, D>
|
impl<N: Complex, D: DimSub<U1>> Copy for Hessenberg<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
|
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
|
||||||
MatrixN<N, D>: Copy,
|
MatrixN<N, D>: Copy,
|
||||||
VectorN<N, DimDiff<D, U1>>: Copy,
|
VectorN<N, DimDiff<D, U1>>: Copy,
|
||||||
{}
|
{}
|
||||||
|
|
||||||
impl<N: Real, D: DimSub<U1>> Hessenberg<N, D>
|
impl<N: Complex, D: DimSub<U1>> Hessenberg<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D> + Allocator<N, DimDiff<D, U1>>
|
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D> + Allocator<N, DimDiff<D, U1>>
|
||||||
{
|
{
|
||||||
/// Computes the Hessenberg decomposition using householder reflections.
|
/// Computes the Hessenberg decomposition using householder reflections.
|
||||||
@ -137,7 +137,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D> + Allocator<N, DimD
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: DimSub<U1>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
impl<N: Complex, D: DimSub<U1>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
||||||
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D> + Allocator<N, DimDiff<D, U1>>
|
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D> + Allocator<N, DimDiff<D, U1>>
|
||||||
{
|
{
|
||||||
/// Computes the Hessenberg decomposition of this matrix using householder reflections.
|
/// Computes the Hessenberg decomposition of this matrix using householder reflections.
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
//! Construction of householder elementary reflections.
|
//! Construction of householder elementary reflections.
|
||||||
|
|
||||||
use alga::general::Real;
|
use num::Zero;
|
||||||
|
use alga::general::Complex;
|
||||||
use allocator::Allocator;
|
use allocator::Allocator;
|
||||||
use base::{DefaultAllocator, MatrixMN, MatrixN, Unit, Vector, VectorN};
|
use base::{DefaultAllocator, MatrixMN, MatrixN, Unit, Vector, VectorN};
|
||||||
use dimension::Dim;
|
use dimension::Dim;
|
||||||
@ -15,35 +16,34 @@ use geometry::Reflection;
|
|||||||
/// `column` after reflection and `false` if no reflection was necessary.
|
/// `column` after reflection and `false` if no reflection was necessary.
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn reflection_axis_mut<N: Real, D: Dim, S: StorageMut<N, D>>(
|
pub fn reflection_axis_mut<N: Complex, D: Dim, S: StorageMut<N, D>>(
|
||||||
column: &mut Vector<N, D, S>,
|
column: &mut Vector<N, D, S>,
|
||||||
) -> (N, bool) {
|
) -> (N, bool) {
|
||||||
let reflection_sq_norm = column.norm_squared();
|
let reflection_sq_norm = column.norm_squared();
|
||||||
let mut reflection_norm = reflection_sq_norm.sqrt();
|
let reflection_norm = reflection_sq_norm.sqrt();
|
||||||
|
|
||||||
let factor;
|
let factor;
|
||||||
unsafe {
|
let scaled_norm;
|
||||||
if *column.vget_unchecked(0) > N::zero() {
|
|
||||||
reflection_norm = -reflection_norm;
|
|
||||||
}
|
|
||||||
|
|
||||||
factor =
|
unsafe {
|
||||||
(reflection_sq_norm - *column.vget_unchecked(0) * reflection_norm) * ::convert(2.0);
|
let (modulus, exp) = column.vget_unchecked(0).to_exp();
|
||||||
*column.vget_unchecked_mut(0) -= reflection_norm;
|
scaled_norm = exp.scale(reflection_norm);
|
||||||
}
|
factor = (reflection_sq_norm + modulus * reflection_norm) * ::convert(2.0);
|
||||||
|
*column.vget_unchecked_mut(0) += scaled_norm;
|
||||||
|
};
|
||||||
|
|
||||||
if !factor.is_zero() {
|
if !factor.is_zero() {
|
||||||
*column /= factor.sqrt();
|
column.unscale_mut(factor.sqrt());
|
||||||
(reflection_norm, true)
|
(-scaled_norm, true)
|
||||||
} else {
|
} else {
|
||||||
(reflection_norm, false)
|
(-scaled_norm, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Uses an householder reflection to zero out the `icol`-th column, starting with the `shift + 1`-th
|
/// Uses an householder reflection to zero out the `icol`-th column, starting with the `shift + 1`-th
|
||||||
/// subdiagonal element.
|
/// subdiagonal element.
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn clear_column_unchecked<N: Real, R: Dim, C: Dim>(
|
pub fn clear_column_unchecked<N: Complex, R: Dim, C: Dim>(
|
||||||
matrix: &mut MatrixMN<N, R, C>,
|
matrix: &mut MatrixMN<N, R, C>,
|
||||||
diag_elt: &mut N,
|
diag_elt: &mut N,
|
||||||
icol: usize,
|
icol: usize,
|
||||||
@ -70,7 +70,7 @@ pub fn clear_column_unchecked<N: Real, R: Dim, C: Dim>(
|
|||||||
/// Uses an hoseholder reflection to zero out the `irow`-th row, ending before the `shift + 1`-th
|
/// Uses an hoseholder reflection to zero out the `irow`-th row, ending before the `shift + 1`-th
|
||||||
/// superdiagonal element.
|
/// superdiagonal element.
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn clear_row_unchecked<N: Real, R: Dim, C: Dim>(
|
pub fn clear_row_unchecked<N: Complex, R: Dim, C: Dim>(
|
||||||
matrix: &mut MatrixMN<N, R, C>,
|
matrix: &mut MatrixMN<N, R, C>,
|
||||||
diag_elt: &mut N,
|
diag_elt: &mut N,
|
||||||
axis_packed: &mut VectorN<N, C>,
|
axis_packed: &mut VectorN<N, C>,
|
||||||
@ -94,7 +94,7 @@ pub fn clear_row_unchecked<N: Real, R: Dim, C: Dim>(
|
|||||||
&mut work.rows_range_mut(irow + 1..),
|
&mut work.rows_range_mut(irow + 1..),
|
||||||
);
|
);
|
||||||
top.columns_range_mut(irow + shift..)
|
top.columns_range_mut(irow + shift..)
|
||||||
.tr_copy_from(refl.axis());
|
.tr_copy_from(&refl.axis());
|
||||||
} else {
|
} else {
|
||||||
top.columns_range_mut(irow + shift..).tr_copy_from(&axis);
|
top.columns_range_mut(irow + shift..).tr_copy_from(&axis);
|
||||||
}
|
}
|
||||||
@ -104,7 +104,7 @@ pub fn clear_row_unchecked<N: Real, R: Dim, C: Dim>(
|
|||||||
/// the lower-diagonal element of the given matrix.
|
/// the lower-diagonal element of the given matrix.
|
||||||
/// matrices.
|
/// matrices.
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn assemble_q<N: Real, D: Dim>(m: &MatrixN<N, D>) -> MatrixN<N, D>
|
pub fn assemble_q<N: Complex, D: Dim>(m: &MatrixN<N, D>) -> MatrixN<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D, D> {
|
where DefaultAllocator: Allocator<N, D, D> {
|
||||||
assert!(m.is_square());
|
assert!(m.is_square());
|
||||||
let dim = m.data.shape().0;
|
let dim = m.data.shape().0;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use alga::general::Real;
|
use alga::general::Complex;
|
||||||
|
|
||||||
use base::allocator::Allocator;
|
use base::allocator::Allocator;
|
||||||
use base::dimension::Dim;
|
use base::dimension::Dim;
|
||||||
@ -7,7 +7,7 @@ use base::{DefaultAllocator, MatrixN, SquareMatrix};
|
|||||||
|
|
||||||
use linalg::lu;
|
use linalg::lu;
|
||||||
|
|
||||||
impl<N: Real, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
impl<N: Complex, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
/// Attempts to invert this matrix.
|
/// Attempts to invert this matrix.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn try_inverse(self) -> Option<MatrixN<N, D>>
|
pub fn try_inverse(self) -> Option<MatrixN<N, D>>
|
||||||
@ -21,7 +21,7 @@ impl<N: Real, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: Dim, S: StorageMut<N, D, D>> SquareMatrix<N, D, S> {
|
impl<N: Complex, D: Dim, S: StorageMut<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
/// Attempts to invert this matrix in-place. Returns `false` and leaves `self` untouched if
|
/// Attempts to invert this matrix in-place. Returns `false` and leaves `self` untouched if
|
||||||
/// inversion fails.
|
/// inversion fails.
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -115,7 +115,7 @@ impl<N: Real, D: Dim, S: StorageMut<N, D, D>> SquareMatrix<N, D, S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: this is an extremely efficient, loop-unrolled matrix inverse from MESA (MIT licensed).
|
// NOTE: this is an extremely efficient, loop-unrolled matrix inverse from MESA (MIT licensed).
|
||||||
fn do_inverse4<N: Real, D: Dim, S: StorageMut<N, D, D>>(
|
fn do_inverse4<N: Complex, D: Dim, S: StorageMut<N, D, D>>(
|
||||||
m: &MatrixN<N, D>,
|
m: &MatrixN<N, D>,
|
||||||
out: &mut SquareMatrix<N, D, S>,
|
out: &mut SquareMatrix<N, D, S>,
|
||||||
) -> bool
|
) -> bool
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use alga::general::{Field, Real};
|
use alga::general::{Field, Complex};
|
||||||
use allocator::{Allocator, Reallocator};
|
use allocator::{Allocator, Reallocator};
|
||||||
use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar};
|
use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar};
|
||||||
use constraint::{SameNumberOfRows, ShapeConstraint};
|
use constraint::{SameNumberOfRows, ShapeConstraint};
|
||||||
@ -32,14 +32,14 @@ use linalg::PermutationSequence;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct LU<N: Real, R: DimMin<C>, C: Dim>
|
pub struct LU<N: Complex, R: DimMin<C>, C: Dim>
|
||||||
where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>
|
where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>
|
||||||
{
|
{
|
||||||
lu: MatrixMN<N, R, C>,
|
lu: MatrixMN<N, R, C>,
|
||||||
p: PermutationSequence<DimMinimum<R, C>>,
|
p: PermutationSequence<DimMinimum<R, C>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, R: DimMin<C>, C: Dim> Copy for LU<N, R, C>
|
impl<N: Complex, R: DimMin<C>, C: Dim> Copy for LU<N, R, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>,
|
DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>,
|
||||||
MatrixMN<N, R, C>: Copy,
|
MatrixMN<N, R, C>: Copy,
|
||||||
@ -49,7 +49,7 @@ where
|
|||||||
/// Performs a LU decomposition to overwrite `out` with the inverse of `matrix`.
|
/// Performs a LU decomposition to overwrite `out` with the inverse of `matrix`.
|
||||||
///
|
///
|
||||||
/// If `matrix` is not invertible, `false` is returned and `out` may contain invalid data.
|
/// If `matrix` is not invertible, `false` is returned and `out` may contain invalid data.
|
||||||
pub fn try_invert_to<N: Real, D: Dim, S>(
|
pub fn try_invert_to<N: Complex, D: Dim, S>(
|
||||||
mut matrix: MatrixN<N, D>,
|
mut matrix: MatrixN<N, D>,
|
||||||
out: &mut Matrix<N, D, D, S>,
|
out: &mut Matrix<N, D, D, S>,
|
||||||
) -> bool
|
) -> bool
|
||||||
@ -66,7 +66,7 @@ where
|
|||||||
out.fill_with_identity();
|
out.fill_with_identity();
|
||||||
|
|
||||||
for i in 0..dim {
|
for i in 0..dim {
|
||||||
let piv = matrix.slice_range(i.., i).iamax() + i;
|
let piv = matrix.slice_range(i.., i).icamax() + i;
|
||||||
let diag = matrix[(piv, i)];
|
let diag = matrix[(piv, i)];
|
||||||
|
|
||||||
if diag.is_zero() {
|
if diag.is_zero() {
|
||||||
@ -86,7 +86,7 @@ where
|
|||||||
matrix.solve_upper_triangular_mut(out)
|
matrix.solve_upper_triangular_mut(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, R: DimMin<C>, C: Dim> LU<N, R, C>
|
impl<N: Complex, R: DimMin<C>, C: Dim> LU<N, R, C>
|
||||||
where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>
|
where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>
|
||||||
{
|
{
|
||||||
/// Computes the LU decomposition with partial (row) pivoting of `matrix`.
|
/// Computes the LU decomposition with partial (row) pivoting of `matrix`.
|
||||||
@ -101,7 +101,7 @@ where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimu
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i in 0..min_nrows_ncols.value() {
|
for i in 0..min_nrows_ncols.value() {
|
||||||
let piv = matrix.slice_range(i.., i).iamax() + i;
|
let piv = matrix.slice_range(i.., i).icamax() + i;
|
||||||
let diag = matrix[(piv, i)];
|
let diag = matrix[(piv, i)];
|
||||||
|
|
||||||
if diag.is_zero() {
|
if diag.is_zero() {
|
||||||
@ -197,7 +197,7 @@ where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: DimMin<D, Output = D>> LU<N, D, D>
|
impl<N: Complex, D: DimMin<D, Output = D>> LU<N, D, D>
|
||||||
where DefaultAllocator: Allocator<N, D, D> + Allocator<(usize, usize), D>
|
where DefaultAllocator: Allocator<N, D, D> + Allocator<(usize, usize), D>
|
||||||
{
|
{
|
||||||
/// Solves the linear system `self * x = b`, where `x` is the unknown to be determined.
|
/// Solves the linear system `self * x = b`, where `x` is the unknown to be determined.
|
||||||
@ -368,7 +368,7 @@ pub fn gauss_step_swap<N, R: Dim, C: Dim, S>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
impl<N: Complex, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
||||||
where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>
|
where DefaultAllocator: Allocator<N, R, C> + Allocator<(usize, usize), DimMinimum<R, C>>
|
||||||
{
|
{
|
||||||
/// Computes the LU decomposition with partial (row) pivoting of `matrix`.
|
/// Computes the LU decomposition with partial (row) pivoting of `matrix`.
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use alga::general::Real;
|
use alga::general::Complex;
|
||||||
use allocator::{Allocator, Reallocator};
|
use allocator::{Allocator, Reallocator};
|
||||||
use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Unit, VectorN};
|
use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Unit, VectorN};
|
||||||
use constraint::{SameNumberOfRows, ShapeConstraint};
|
use constraint::{SameNumberOfRows, ShapeConstraint};
|
||||||
@ -32,21 +32,21 @@ use linalg::householder;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct QR<N: Real, R: DimMin<C>, C: Dim>
|
pub struct QR<N: Complex, R: DimMin<C>, C: Dim>
|
||||||
where DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>
|
where DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>
|
||||||
{
|
{
|
||||||
qr: MatrixMN<N, R, C>,
|
qr: MatrixMN<N, R, C>,
|
||||||
diag: VectorN<N, DimMinimum<R, C>>,
|
diag: VectorN<N, DimMinimum<R, C>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, R: DimMin<C>, C: Dim> Copy for QR<N, R, C>
|
impl<N: Complex, R: DimMin<C>, C: Dim> Copy for QR<N, R, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>,
|
DefaultAllocator: Allocator<N, R, C> + Allocator<N, DimMinimum<R, C>>,
|
||||||
MatrixMN<N, R, C>: Copy,
|
MatrixMN<N, R, C>: Copy,
|
||||||
VectorN<N, DimMinimum<R, C>>: Copy,
|
VectorN<N, DimMinimum<R, C>>: Copy,
|
||||||
{}
|
{}
|
||||||
|
|
||||||
impl<N: Real, R: DimMin<C>, C: Dim> QR<N, R, C>
|
impl<N: Complex, R: DimMin<C>, C: Dim> QR<N, R, C>
|
||||||
where DefaultAllocator: Allocator<N, R, C> + Allocator<N, R> + Allocator<N, DimMinimum<R, C>>
|
where DefaultAllocator: Allocator<N, R, C> + Allocator<N, R> + Allocator<N, DimMinimum<R, C>>
|
||||||
{
|
{
|
||||||
/// Computes the QR decomposition using householder reflections.
|
/// Computes the QR decomposition using householder reflections.
|
||||||
@ -162,7 +162,7 @@ where DefaultAllocator: Allocator<N, R, C> + Allocator<N, R> + Allocator<N, DimM
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: DimMin<D, Output = D>> QR<N, D, D>
|
impl<N: Complex, D: DimMin<D, Output = D>> QR<N, D, D>
|
||||||
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
||||||
{
|
{
|
||||||
/// Solves the linear system `self * x = b`, where `x` is the unknown to be determined.
|
/// Solves the linear system `self * x = b`, where `x` is the unknown to be determined.
|
||||||
@ -294,7 +294,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
// }
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
impl<N: Complex, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
||||||
where DefaultAllocator: Allocator<N, R, C> + Allocator<N, R> + Allocator<N, DimMinimum<R, C>>
|
where DefaultAllocator: Allocator<N, R, C> + Allocator<N, R> + Allocator<N, DimMinimum<R, C>>
|
||||||
{
|
{
|
||||||
/// Computes the QR decomposition of this matrix.
|
/// Computes the QR decomposition of this matrix.
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use alga::general::Real;
|
use approx::AbsDiffEq;
|
||||||
use num_complex::Complex;
|
use alga::general::{Complex, Real};
|
||||||
|
use num_complex::Complex as NumComplex;
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
|
|
||||||
use allocator::Allocator;
|
use allocator::Allocator;
|
||||||
@ -14,6 +15,7 @@ use constraint::{DimEq, ShapeConstraint};
|
|||||||
use geometry::{Reflection, UnitComplex};
|
use geometry::{Reflection, UnitComplex};
|
||||||
use linalg::householder;
|
use linalg::householder;
|
||||||
use linalg::Hessenberg;
|
use linalg::Hessenberg;
|
||||||
|
use linalg::givens::GivensRotation;
|
||||||
|
|
||||||
/// Real Schur decomposition of a square matrix.
|
/// Real Schur decomposition of a square matrix.
|
||||||
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
|
||||||
@ -32,20 +34,20 @@ use linalg::Hessenberg;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct RealSchur<N: Real, D: Dim>
|
pub struct RealSchur<N: Complex, D: Dim>
|
||||||
where DefaultAllocator: Allocator<N, D, D>
|
where DefaultAllocator: Allocator<N, D, D>
|
||||||
{
|
{
|
||||||
q: MatrixN<N, D>,
|
q: MatrixN<N, D>,
|
||||||
t: MatrixN<N, D>,
|
t: MatrixN<N, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: Dim> Copy for RealSchur<N, D>
|
impl<N: Complex, D: Dim> Copy for RealSchur<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D>,
|
DefaultAllocator: Allocator<N, D, D>,
|
||||||
MatrixN<N, D>: Copy,
|
MatrixN<N, D>: Copy,
|
||||||
{}
|
{}
|
||||||
|
|
||||||
impl<N: Real, D: Dim> RealSchur<N, D>
|
impl<N: Complex, D: Dim> RealSchur<N, D>
|
||||||
where
|
where
|
||||||
D: DimSub<U1>, // For Hessenberg.
|
D: DimSub<U1>, // For Hessenberg.
|
||||||
ShapeConstraint: DimEq<Dynamic, DimDiff<D, U1>>, // For Hessenberg.
|
ShapeConstraint: DimEq<Dynamic, DimDiff<D, U1>>, // For Hessenberg.
|
||||||
@ -56,7 +58,7 @@ where
|
|||||||
{
|
{
|
||||||
/// Computes the Schur decomposition of a square matrix.
|
/// Computes the Schur decomposition of a square matrix.
|
||||||
pub fn new(m: MatrixN<N, D>) -> Self {
|
pub fn new(m: MatrixN<N, D>) -> Self {
|
||||||
Self::try_new(m, N::default_epsilon(), 0).unwrap()
|
Self::try_new(m, N::Real::default_epsilon(), 0).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Attempts to compute the Schur decomposition of a square matrix.
|
/// Attempts to compute the Schur decomposition of a square matrix.
|
||||||
@ -70,7 +72,7 @@ where
|
|||||||
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
||||||
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
||||||
/// continues indefinitely until convergence.
|
/// continues indefinitely until convergence.
|
||||||
pub fn try_new(m: MatrixN<N, D>, eps: N, max_niter: usize) -> Option<Self> {
|
pub fn try_new(m: MatrixN<N, D>, eps: N::Real, max_niter: usize) -> Option<Self> {
|
||||||
let mut work = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) };
|
let mut work = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) };
|
||||||
|
|
||||||
Self::do_decompose(m, &mut work, eps, max_niter, true).map(|(q, t)| RealSchur {
|
Self::do_decompose(m, &mut work, eps, max_niter, true).map(|(q, t)| RealSchur {
|
||||||
@ -82,7 +84,7 @@ where
|
|||||||
fn do_decompose(
|
fn do_decompose(
|
||||||
mut m: MatrixN<N, D>,
|
mut m: MatrixN<N, D>,
|
||||||
work: &mut VectorN<N, D>,
|
work: &mut VectorN<N, D>,
|
||||||
eps: N,
|
eps: N::Real,
|
||||||
max_niter: usize,
|
max_niter: usize,
|
||||||
compute_q: bool,
|
compute_q: bool,
|
||||||
) -> Option<(Option<MatrixN<N, D>>, MatrixN<N, D>)>
|
) -> Option<(Option<MatrixN<N, D>>, MatrixN<N, D>)>
|
||||||
@ -111,8 +113,8 @@ where
|
|||||||
return decompose_2x2(m, compute_q);
|
return decompose_2x2(m, compute_q);
|
||||||
}
|
}
|
||||||
|
|
||||||
let amax_m = m.amax();
|
let amax_m = m.camax();
|
||||||
m /= amax_m;
|
m.unscale_mut(amax_m);
|
||||||
|
|
||||||
let hess = Hessenberg::new_with_workspace(m, work);
|
let hess = Hessenberg::new_with_workspace(m, work);
|
||||||
let mut q;
|
let mut q;
|
||||||
@ -259,7 +261,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t *= amax_m;
|
t.scale_mut(amax_m);
|
||||||
|
|
||||||
Some((q, t))
|
Some((q, t))
|
||||||
}
|
}
|
||||||
@ -289,8 +291,9 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the complex eigenvalues of the decomposed matrix.
|
/// Computes the complex eigenvalues of the decomposed matrix.
|
||||||
fn do_complex_eigenvalues(t: &MatrixN<N, D>, out: &mut VectorN<Complex<N>, D>)
|
fn do_complex_eigenvalues(t: &MatrixN<N, D>, out: &mut VectorN<NumComplex<N>, D>)
|
||||||
where DefaultAllocator: Allocator<Complex<N>, D> {
|
where N: Real,
|
||||||
|
DefaultAllocator: Allocator<NumComplex<N>, D> {
|
||||||
let dim = t.nrows();
|
let dim = t.nrows();
|
||||||
let mut m = 0;
|
let mut m = 0;
|
||||||
|
|
||||||
@ -298,7 +301,7 @@ where
|
|||||||
let n = m + 1;
|
let n = m + 1;
|
||||||
|
|
||||||
if t[(n, m)].is_zero() {
|
if t[(n, m)].is_zero() {
|
||||||
out[m] = Complex::new(t[(m, m)], N::zero());
|
out[m] = NumComplex::new(t[(m, m)], N::zero());
|
||||||
m += 1;
|
m += 1;
|
||||||
} else {
|
} else {
|
||||||
// Solve the 2x2 eigenvalue subproblem.
|
// Solve the 2x2 eigenvalue subproblem.
|
||||||
@ -313,21 +316,21 @@ where
|
|||||||
|
|
||||||
// All 2x2 blocks have negative discriminant because we already decoupled those
|
// All 2x2 blocks have negative discriminant because we already decoupled those
|
||||||
// with positive eigenvalues..
|
// with positive eigenvalues..
|
||||||
let sqrt_discr = Complex::new(N::zero(), (-discr).sqrt());
|
let sqrt_discr = NumComplex::new(N::zero(), (-discr).sqrt());
|
||||||
|
|
||||||
out[m] = Complex::new(tra * ::convert(0.5), N::zero()) + sqrt_discr;
|
out[m] = NumComplex::new(tra * ::convert(0.5), N::zero()) + sqrt_discr;
|
||||||
out[m + 1] = Complex::new(tra * ::convert(0.5), N::zero()) - sqrt_discr;
|
out[m + 1] = NumComplex::new(tra * ::convert(0.5), N::zero()) - sqrt_discr;
|
||||||
|
|
||||||
m += 2;
|
m += 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if m == dim - 1 {
|
if m == dim - 1 {
|
||||||
out[m] = Complex::new(t[(m, m)], N::zero());
|
out[m] = NumComplex::new(t[(m, m)], N::zero());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn delimit_subproblem(t: &mut MatrixN<N, D>, eps: N, end: usize) -> (usize, usize)
|
fn delimit_subproblem(t: &mut MatrixN<N, D>, eps: N::Real, end: usize) -> (usize, usize)
|
||||||
where
|
where
|
||||||
D: DimSub<U1>,
|
D: DimSub<U1>,
|
||||||
DefaultAllocator: Allocator<N, DimDiff<D, U1>>,
|
DefaultAllocator: Allocator<N, DimDiff<D, U1>>,
|
||||||
@ -337,7 +340,7 @@ where
|
|||||||
while n > 0 {
|
while n > 0 {
|
||||||
let m = n - 1;
|
let m = n - 1;
|
||||||
|
|
||||||
if t[(n, m)].abs() <= eps * (t[(n, n)].abs() + t[(m, m)].abs()) {
|
if t[(n, m)].modulus() <= eps * (t[(n, n)].modulus() + t[(m, m)].modulus()) {
|
||||||
t[(n, m)] = N::zero();
|
t[(n, m)] = N::zero();
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
@ -356,7 +359,7 @@ where
|
|||||||
|
|
||||||
let off_diag = t[(new_start, m)];
|
let off_diag = t[(new_start, m)];
|
||||||
if off_diag.is_zero()
|
if off_diag.is_zero()
|
||||||
|| off_diag.abs() <= eps * (t[(new_start, new_start)].abs() + t[(m, m)].abs())
|
|| off_diag.modulus() <= eps * (t[(new_start, new_start)].modulus() + t[(m, m)].modulus())
|
||||||
{
|
{
|
||||||
t[(new_start, m)] = N::zero();
|
t[(new_start, m)] = N::zero();
|
||||||
break;
|
break;
|
||||||
@ -387,15 +390,16 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the complex eigenvalues of the decomposed matrix.
|
/// Computes the complex eigenvalues of the decomposed matrix.
|
||||||
pub fn complex_eigenvalues(&self) -> VectorN<Complex<N>, D>
|
pub fn complex_eigenvalues(&self) -> VectorN<NumComplex<N>, D>
|
||||||
where DefaultAllocator: Allocator<Complex<N>, D> {
|
where N: Real,
|
||||||
|
DefaultAllocator: Allocator<NumComplex<N>, D> {
|
||||||
let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) };
|
let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) };
|
||||||
Self::do_complex_eigenvalues(&self.t, &mut out);
|
Self::do_complex_eigenvalues(&self.t, &mut out);
|
||||||
out
|
out
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decompose_2x2<N: Real, D: Dim>(
|
fn decompose_2x2<N: Complex, D: Dim>(
|
||||||
mut m: MatrixN<N, D>,
|
mut m: MatrixN<N, D>,
|
||||||
compute_q: bool,
|
compute_q: bool,
|
||||||
) -> Option<(Option<MatrixN<N, D>>, MatrixN<N, D>)>
|
) -> Option<(Option<MatrixN<N, D>>, MatrixN<N, D>)>
|
||||||
@ -412,13 +416,12 @@ where
|
|||||||
rot.rotate_rows(&mut m);
|
rot.rotate_rows(&mut m);
|
||||||
|
|
||||||
if compute_q {
|
if compute_q {
|
||||||
let c = rot.into_inner();
|
|
||||||
// XXX: we have to build the matrix manually because
|
// XXX: we have to build the matrix manually because
|
||||||
// rot.to_rotation_matrix().unwrap() causes an ICE.
|
// rot.to_rotation_matrix().unwrap() causes an ICE.
|
||||||
q = Some(MatrixN::from_column_slice_generic(
|
q = Some(MatrixN::from_column_slice_generic(
|
||||||
dim,
|
dim,
|
||||||
dim,
|
dim,
|
||||||
&[c.re, c.im, -c.im, c.re],
|
&[rot.c(), rot.s(), -rot.s().conjugate(), rot.c().conjugate()],
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -432,7 +435,7 @@ where
|
|||||||
Some((q, m))
|
Some((q, m))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compute_2x2_eigvals<N: Real, S: Storage<N, U2, U2>>(
|
fn compute_2x2_eigvals<N: Complex, S: Storage<N, U2, U2>>(
|
||||||
m: &SquareMatrix<N, U2, S>,
|
m: &SquareMatrix<N, U2, S>,
|
||||||
) -> Option<(N, N)> {
|
) -> Option<(N, N)> {
|
||||||
// Solve the 2x2 eigenvalue subproblem.
|
// Solve the 2x2 eigenvalue subproblem.
|
||||||
@ -447,13 +450,10 @@ fn compute_2x2_eigvals<N: Real, S: Storage<N, U2, U2>>(
|
|||||||
let val = (h00 - h11) * ::convert(0.5);
|
let val = (h00 - h11) * ::convert(0.5);
|
||||||
let discr = h10 * h01 + val * val;
|
let discr = h10 * h01 + val * val;
|
||||||
|
|
||||||
if discr >= N::zero() {
|
discr.try_sqrt().map(|sqrt_discr| {
|
||||||
let sqrt_discr = discr.sqrt();
|
|
||||||
let half_tra = (h00 + h11) * ::convert(0.5);
|
let half_tra = (h00 + h11) * ::convert(0.5);
|
||||||
Some((half_tra + sqrt_discr, half_tra - sqrt_discr))
|
(half_tra + sqrt_discr, half_tra - sqrt_discr)
|
||||||
} else {
|
})
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Computes the 2x2 transformation that upper-triangulates a 2x2 matrix with real eigenvalues.
|
// Computes the 2x2 transformation that upper-triangulates a 2x2 matrix with real eigenvalues.
|
||||||
@ -461,9 +461,9 @@ fn compute_2x2_eigvals<N: Real, S: Storage<N, U2, U2>>(
|
|||||||
///
|
///
|
||||||
/// Returns `None` if the matrix has complex eigenvalues, or is upper-triangular. In both case,
|
/// Returns `None` if the matrix has complex eigenvalues, or is upper-triangular. In both case,
|
||||||
/// the basis is the identity.
|
/// the basis is the identity.
|
||||||
fn compute_2x2_basis<N: Real, S: Storage<N, U2, U2>>(
|
fn compute_2x2_basis<N: Complex, S: Storage<N, U2, U2>>(
|
||||||
m: &SquareMatrix<N, U2, S>,
|
m: &SquareMatrix<N, U2, S>,
|
||||||
) -> Option<UnitComplex<N>> {
|
) -> Option<GivensRotation<N>> {
|
||||||
let h10 = m[(1, 0)];
|
let h10 = m[(1, 0)];
|
||||||
|
|
||||||
if h10.is_zero() {
|
if h10.is_zero() {
|
||||||
@ -477,19 +477,17 @@ fn compute_2x2_basis<N: Real, S: Storage<N, U2, U2>>(
|
|||||||
// NOTE: Choose the one that yields a larger x component.
|
// NOTE: Choose the one that yields a larger x component.
|
||||||
// This is necessary for numerical stability of the normalization of the complex
|
// This is necessary for numerical stability of the normalization of the complex
|
||||||
// number.
|
// number.
|
||||||
let basis = if x1.abs() > x2.abs() {
|
if x1.modulus() > x2.modulus() {
|
||||||
Complex::new(x1, -h10)
|
Some(GivensRotation::new(x1, -h10))
|
||||||
} else {
|
} else {
|
||||||
Complex::new(x2, -h10)
|
Some(GivensRotation::new(x2, -h10))
|
||||||
};
|
}
|
||||||
|
|
||||||
Some(UnitComplex::from_complex(basis))
|
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
impl<N: Complex, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
||||||
where
|
where
|
||||||
D: DimSub<U1>, // For Hessenberg.
|
D: DimSub<U1>, // For Hessenberg.
|
||||||
ShapeConstraint: DimEq<Dynamic, DimDiff<D, U1>>, // For Hessenberg.
|
ShapeConstraint: DimEq<Dynamic, DimDiff<D, U1>>, // For Hessenberg.
|
||||||
@ -514,7 +512,7 @@ where
|
|||||||
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
||||||
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
||||||
/// continues indefinitely until convergence.
|
/// continues indefinitely until convergence.
|
||||||
pub fn try_real_schur(self, eps: N, max_niter: usize) -> Option<RealSchur<N, D>> {
|
pub fn try_real_schur(self, eps: N::Real, max_niter: usize) -> Option<RealSchur<N, D>> {
|
||||||
RealSchur::try_new(self.into_owned(), eps, max_niter)
|
RealSchur::try_new(self.into_owned(), eps, max_niter)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -546,7 +544,7 @@ where
|
|||||||
let schur = RealSchur::do_decompose(
|
let schur = RealSchur::do_decompose(
|
||||||
self.clone_owned(),
|
self.clone_owned(),
|
||||||
&mut work,
|
&mut work,
|
||||||
N::default_epsilon(),
|
N::Real::default_epsilon(),
|
||||||
0,
|
0,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
@ -559,9 +557,10 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the eigenvalues of this matrix.
|
/// Computes the eigenvalues of this matrix.
|
||||||
pub fn complex_eigenvalues(&self) -> VectorN<Complex<N>, D>
|
pub fn complex_eigenvalues(&self) -> VectorN<NumComplex<N>, D>
|
||||||
// FIXME: add balancing?
|
// FIXME: add balancing?
|
||||||
where DefaultAllocator: Allocator<Complex<N>, D> {
|
where N: Real,
|
||||||
|
DefaultAllocator: Allocator<NumComplex<N>, D> {
|
||||||
let dim = self.data.shape().0;
|
let dim = self.data.shape().0;
|
||||||
let mut work = unsafe { VectorN::new_uninitialized_generic(dim, U1) };
|
let mut work = unsafe { VectorN::new_uninitialized_generic(dim, U1) };
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use alga::general::Real;
|
use alga::general::Complex;
|
||||||
|
|
||||||
use base::allocator::Allocator;
|
use base::allocator::Allocator;
|
||||||
use base::constraint::{SameNumberOfRows, ShapeConstraint};
|
use base::constraint::{SameNumberOfRows, ShapeConstraint};
|
||||||
@ -6,7 +6,7 @@ use base::dimension::{Dim, U1};
|
|||||||
use base::storage::{Storage, StorageMut};
|
use base::storage::{Storage, StorageMut};
|
||||||
use base::{DefaultAllocator, Matrix, MatrixMN, SquareMatrix, Vector};
|
use base::{DefaultAllocator, Matrix, MatrixMN, SquareMatrix, Vector};
|
||||||
|
|
||||||
impl<N: Real, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
impl<N: Complex, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
|
||||||
/// Computes the solution of the linear system `self . x = b` where `x` is the unknown and only
|
/// Computes the solution of the linear system `self . x = b` where `x` is the unknown and only
|
||||||
/// the lower-triangular part of `self` (including the diagonal) is considered not-zero.
|
/// the lower-triangular part of `self` (including the diagonal) is considered not-zero.
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -1,17 +1,19 @@
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use num_complex::Complex;
|
use num::Zero;
|
||||||
|
use num_complex::Complex as NumComplex;
|
||||||
|
use approx::AbsDiffEq;
|
||||||
use std::ops::MulAssign;
|
use std::ops::MulAssign;
|
||||||
|
|
||||||
use alga::general::Real;
|
use alga::general::Complex;
|
||||||
use allocator::Allocator;
|
use allocator::Allocator;
|
||||||
use base::{DefaultAllocator, Matrix2, MatrixN, SquareMatrix, Vector2, VectorN};
|
use base::{DefaultAllocator, Matrix2, MatrixN, SquareMatrix, Vector2, VectorN};
|
||||||
use dimension::{Dim, DimDiff, DimSub, U1, U2};
|
use dimension::{Dim, DimDiff, DimSub, U1, U2};
|
||||||
use storage::Storage;
|
use storage::Storage;
|
||||||
|
|
||||||
use geometry::UnitComplex;
|
use geometry::UnitComplex;
|
||||||
use linalg::givens;
|
use linalg::givens::GivensRotation;
|
||||||
use linalg::SymmetricTridiagonal;
|
use linalg::SymmetricTridiagonal;
|
||||||
|
|
||||||
/// Eigendecomposition of a symmetric matrix.
|
/// Eigendecomposition of a symmetric matrix.
|
||||||
@ -35,7 +37,7 @@ use linalg::SymmetricTridiagonal;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct SymmetricEigen<N: Real, D: Dim>
|
pub struct SymmetricEigen<N: Complex, D: Dim>
|
||||||
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
||||||
{
|
{
|
||||||
/// The eigenvectors of the decomposed matrix.
|
/// The eigenvectors of the decomposed matrix.
|
||||||
@ -45,14 +47,14 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
pub eigenvalues: VectorN<N, D>,
|
pub eigenvalues: VectorN<N, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: Dim> Copy for SymmetricEigen<N, D>
|
impl<N: Complex, D: Dim> Copy for SymmetricEigen<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
|
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
|
||||||
MatrixN<N, D>: Copy,
|
MatrixN<N, D>: Copy,
|
||||||
VectorN<N, D>: Copy,
|
VectorN<N, D>: Copy,
|
||||||
{}
|
{}
|
||||||
|
|
||||||
impl<N: Real, D: Dim> SymmetricEigen<N, D>
|
impl<N: Complex, D: Dim> SymmetricEigen<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
||||||
{
|
{
|
||||||
/// Computes the eigendecomposition of the given symmetric matrix.
|
/// Computes the eigendecomposition of the given symmetric matrix.
|
||||||
@ -63,7 +65,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
D: DimSub<U1>,
|
D: DimSub<U1>,
|
||||||
DefaultAllocator: Allocator<N, DimDiff<D, U1>>,
|
DefaultAllocator: Allocator<N, DimDiff<D, U1>>,
|
||||||
{
|
{
|
||||||
Self::try_new(m, N::default_epsilon(), 0).unwrap()
|
Self::try_new(m, N::Real::default_epsilon(), 0).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the eigendecomposition of the given symmetric matrix with user-specified
|
/// Computes the eigendecomposition of the given symmetric matrix with user-specified
|
||||||
@ -77,7 +79,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
||||||
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
||||||
/// continues indefinitely until convergence.
|
/// continues indefinitely until convergence.
|
||||||
pub fn try_new(m: MatrixN<N, D>, eps: N, max_niter: usize) -> Option<Self>
|
pub fn try_new(m: MatrixN<N, D>, eps: N::Real, max_niter: usize) -> Option<Self>
|
||||||
where
|
where
|
||||||
D: DimSub<U1>,
|
D: DimSub<U1>,
|
||||||
DefaultAllocator: Allocator<N, DimDiff<D, U1>>,
|
DefaultAllocator: Allocator<N, DimDiff<D, U1>>,
|
||||||
@ -91,7 +93,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
fn do_decompose(
|
fn do_decompose(
|
||||||
mut m: MatrixN<N, D>,
|
mut m: MatrixN<N, D>,
|
||||||
eigenvectors: bool,
|
eigenvectors: bool,
|
||||||
eps: N,
|
eps: N::Real,
|
||||||
max_niter: usize,
|
max_niter: usize,
|
||||||
) -> Option<(VectorN<N, D>, Option<MatrixN<N, D>>)>
|
) -> Option<(VectorN<N, D>, Option<MatrixN<N, D>>)>
|
||||||
where
|
where
|
||||||
@ -103,11 +105,10 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
"Unable to compute the eigendecomposition of a non-square matrix."
|
"Unable to compute the eigendecomposition of a non-square matrix."
|
||||||
);
|
);
|
||||||
let dim = m.nrows();
|
let dim = m.nrows();
|
||||||
|
let m_amax = m.camax();
|
||||||
let m_amax = m.amax();
|
|
||||||
|
|
||||||
if !m_amax.is_zero() {
|
if !m_amax.is_zero() {
|
||||||
m /= m_amax;
|
m.unscale_mut(m_amax);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut q, mut diag, mut off_diag);
|
let (mut q, mut diag, mut off_diag);
|
||||||
@ -125,7 +126,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
}
|
}
|
||||||
|
|
||||||
if dim == 1 {
|
if dim == 1 {
|
||||||
diag *= m_amax;
|
diag.scale_mut(m_amax);
|
||||||
return Some((diag, q));
|
return Some((diag, q));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -147,7 +148,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
for i in start..n {
|
for i in start..n {
|
||||||
let j = i + 1;
|
let j = i + 1;
|
||||||
|
|
||||||
if let Some((rot, norm)) = givens::cancel_y(&v) {
|
if let Some((rot, norm)) = GivensRotation::cancel_y(&v) {
|
||||||
if i > start {
|
if i > start {
|
||||||
// Not the first iteration.
|
// Not the first iteration.
|
||||||
off_diag[i - 1] = norm;
|
off_diag[i - 1] = norm;
|
||||||
@ -157,9 +158,9 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
let mjj = diag[j];
|
let mjj = diag[j];
|
||||||
let mij = off_diag[i];
|
let mij = off_diag[i];
|
||||||
|
|
||||||
let cc = rot.cos_angle() * rot.cos_angle();
|
let cc = rot.c() * rot.c();
|
||||||
let ss = rot.sin_angle() * rot.sin_angle();
|
let ss = rot.s() * rot.s();
|
||||||
let cs = rot.cos_angle() * rot.sin_angle();
|
let cs = rot.c() * rot.s();
|
||||||
|
|
||||||
let b = cs * ::convert(2.0) * mij;
|
let b = cs * ::convert(2.0) * mij;
|
||||||
|
|
||||||
@ -169,8 +170,8 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
|
|
||||||
if i != n - 1 {
|
if i != n - 1 {
|
||||||
v.x = off_diag[i];
|
v.x = off_diag[i];
|
||||||
v.y = -rot.sin_angle() * off_diag[i + 1];
|
v.y = -rot.s() * off_diag[i + 1];
|
||||||
off_diag[i + 1] *= rot.cos_angle();
|
off_diag[i + 1] *= rot.c();
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(ref mut q) = q {
|
if let Some(ref mut q) = q {
|
||||||
@ -181,7 +182,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if off_diag[m].abs() <= eps * (diag[m].abs() + diag[n].abs()) {
|
if off_diag[m].modulus() <= eps * (diag[m].modulus() + diag[n].modulus()) {
|
||||||
end -= 1;
|
end -= 1;
|
||||||
}
|
}
|
||||||
} else if subdim == 2 {
|
} else if subdim == 2 {
|
||||||
@ -198,8 +199,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
diag[start + 1] = eigvals[1];
|
diag[start + 1] = eigvals[1];
|
||||||
|
|
||||||
if let Some(ref mut q) = q {
|
if let Some(ref mut q) = q {
|
||||||
if let Some(basis) = basis.try_normalize(eps) {
|
if let Some(rot) = GivensRotation::try_new(basis.x, basis.y, eps) {
|
||||||
let rot = UnitComplex::new_unchecked(Complex::new(basis.x, basis.y));
|
|
||||||
rot.rotate_rows(&mut q.fixed_columns_mut::<U2>(start));
|
rot.rotate_rows(&mut q.fixed_columns_mut::<U2>(start));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -219,7 +219,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
diag *= m_amax;
|
diag.scale_mut(m_amax);
|
||||||
|
|
||||||
Some((diag, q))
|
Some((diag, q))
|
||||||
}
|
}
|
||||||
@ -228,7 +228,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
diag: &VectorN<N, D>,
|
diag: &VectorN<N, D>,
|
||||||
off_diag: &mut VectorN<N, DimDiff<D, U1>>,
|
off_diag: &mut VectorN<N, DimDiff<D, U1>>,
|
||||||
end: usize,
|
end: usize,
|
||||||
eps: N,
|
eps: N::Real,
|
||||||
) -> (usize, usize)
|
) -> (usize, usize)
|
||||||
where
|
where
|
||||||
D: DimSub<U1>,
|
D: DimSub<U1>,
|
||||||
@ -239,7 +239,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
while n > 0 {
|
while n > 0 {
|
||||||
let m = n - 1;
|
let m = n - 1;
|
||||||
|
|
||||||
if off_diag[m].abs() > eps * (diag[n].abs() + diag[m].abs()) {
|
if off_diag[m].modulus() > eps * (diag[n].modulus() + diag[m].modulus()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -255,7 +255,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
let m = new_start - 1;
|
let m = new_start - 1;
|
||||||
|
|
||||||
if off_diag[m].is_zero()
|
if off_diag[m].is_zero()
|
||||||
|| off_diag[m].abs() <= eps * (diag[new_start].abs() + diag[m].abs())
|
|| off_diag[m].modulus() <= eps * (diag[new_start].modulus() + diag[m].modulus())
|
||||||
{
|
{
|
||||||
off_diag[m] = N::zero();
|
off_diag[m] = N::zero();
|
||||||
break;
|
break;
|
||||||
@ -276,7 +276,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
let val = self.eigenvalues[i];
|
let val = self.eigenvalues[i];
|
||||||
u_t.column_mut(i).mul_assign(val);
|
u_t.column_mut(i).mul_assign(val);
|
||||||
}
|
}
|
||||||
u_t.transpose_mut();
|
u_t.conjugate_transpose_mut();
|
||||||
&self.eigenvectors * u_t
|
&self.eigenvectors * u_t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -287,7 +287,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
|
|||||||
/// The inputs are interpreted as the 2x2 matrix:
|
/// The inputs are interpreted as the 2x2 matrix:
|
||||||
/// tmm tmn
|
/// tmm tmn
|
||||||
/// tmn tnn
|
/// tmn tnn
|
||||||
pub fn wilkinson_shift<N: Real>(tmm: N, tnn: N, tmn: N) -> N {
|
pub fn wilkinson_shift<N: Complex>(tmm: N, tnn: N, tmn: N) -> N {
|
||||||
let sq_tmn = tmn * tmn;
|
let sq_tmn = tmn * tmn;
|
||||||
if !sq_tmn.is_zero() {
|
if !sq_tmn.is_zero() {
|
||||||
// We have the guarantee that the denominator won't be zero.
|
// We have the guarantee that the denominator won't be zero.
|
||||||
@ -303,7 +303,7 @@ pub fn wilkinson_shift<N: Real>(tmm: N, tnn: N, tmn: N) -> N {
|
|||||||
* Computations of eigenvalues for symmetric matrices.
|
* Computations of eigenvalues for symmetric matrices.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
impl<N: Real, D: DimSub<U1>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
impl<N: Complex, D: DimSub<U1>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
||||||
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D> + Allocator<N, DimDiff<D, U1>>
|
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D> + Allocator<N, DimDiff<D, U1>>
|
||||||
{
|
{
|
||||||
/// Computes the eigendecomposition of this symmetric matrix.
|
/// Computes the eigendecomposition of this symmetric matrix.
|
||||||
@ -324,7 +324,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D> + Allocator<N, DimD
|
|||||||
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
||||||
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
||||||
/// continues indefinitely until convergence.
|
/// continues indefinitely until convergence.
|
||||||
pub fn try_symmetric_eigen(self, eps: N, max_niter: usize) -> Option<SymmetricEigen<N, D>> {
|
pub fn try_symmetric_eigen(self, eps: N::Real, max_niter: usize) -> Option<SymmetricEigen<N, D>> {
|
||||||
SymmetricEigen::try_new(self.into_owned(), eps, max_niter)
|
SymmetricEigen::try_new(self.into_owned(), eps, max_niter)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -332,7 +332,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D> + Allocator<N, DimD
|
|||||||
///
|
///
|
||||||
/// Only the lower-triangular part of the matrix is read.
|
/// Only the lower-triangular part of the matrix is read.
|
||||||
pub fn symmetric_eigenvalues(&self) -> VectorN<N, D> {
|
pub fn symmetric_eigenvalues(&self) -> VectorN<N, D> {
|
||||||
SymmetricEigen::do_decompose(self.clone_owned(), false, N::default_epsilon(), 0)
|
SymmetricEigen::do_decompose(self.clone_owned(), false, N::Real::default_epsilon(), 0)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.0
|
.0
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#[cfg(feature = "serde-serialize")]
|
#[cfg(feature = "serde-serialize")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use alga::general::Real;
|
use alga::general::Complex;
|
||||||
use allocator::Allocator;
|
use allocator::Allocator;
|
||||||
use base::{DefaultAllocator, MatrixMN, MatrixN, SquareMatrix, VectorN};
|
use base::{DefaultAllocator, MatrixMN, MatrixN, SquareMatrix, VectorN};
|
||||||
use dimension::{DimDiff, DimSub, U1};
|
use dimension::{DimDiff, DimSub, U1};
|
||||||
@ -30,21 +30,21 @@ use linalg::householder;
|
|||||||
))
|
))
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct SymmetricTridiagonal<N: Real, D: DimSub<U1>>
|
pub struct SymmetricTridiagonal<N: Complex, D: DimSub<U1>>
|
||||||
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
|
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
|
||||||
{
|
{
|
||||||
tri: MatrixN<N, D>,
|
tri: MatrixN<N, D>,
|
||||||
off_diagonal: VectorN<N, DimDiff<D, U1>>,
|
off_diagonal: VectorN<N, DimDiff<D, U1>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: DimSub<U1>> Copy for SymmetricTridiagonal<N, D>
|
impl<N: Complex, D: DimSub<U1>> Copy for SymmetricTridiagonal<N, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
|
DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>,
|
||||||
MatrixN<N, D>: Copy,
|
MatrixN<N, D>: Copy,
|
||||||
VectorN<N, DimDiff<D, U1>>: Copy,
|
VectorN<N, DimDiff<D, U1>>: Copy,
|
||||||
{}
|
{}
|
||||||
|
|
||||||
impl<N: Real, D: DimSub<U1>> SymmetricTridiagonal<N, D>
|
impl<N: Complex, D: DimSub<U1>> SymmetricTridiagonal<N, D>
|
||||||
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
|
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
|
||||||
{
|
{
|
||||||
/// Computes the tridiagonalization of the symmetric matrix `m`.
|
/// Computes the tridiagonalization of the symmetric matrix `m`.
|
||||||
@ -75,17 +75,18 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
|
|||||||
if not_zero {
|
if not_zero {
|
||||||
let mut p = p.rows_range_mut(i..);
|
let mut p = p.rows_range_mut(i..);
|
||||||
|
|
||||||
p.gemv_symm(::convert(2.0), &m, &axis, N::zero());
|
p.gemv_symm(::convert(2.0), &m, &axis.conjugate(), N::zero());
|
||||||
let dot = axis.dot(&p);
|
let dot = axis.dot(&p);
|
||||||
p.axpy(-dot, &axis, N::one());
|
// p.axpy(-dot, &axis.conjugate(), N::one());
|
||||||
m.ger_symm(-N::one(), &p, &axis, N::one());
|
m.ger_symm(-N::one(), &p, &axis.conjugate(), N::one());
|
||||||
m.ger_symm(-N::one(), &axis, &p, N::one());
|
m.ger_symm(-N::one(), &axis, &p, N::one());
|
||||||
|
m.ger_symm(dot * ::convert(2.0), &axis, &axis.conjugate(), N::one());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
tri: m,
|
tri: m,
|
||||||
off_diagonal: off_diagonal,
|
off_diagonal,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,14 +139,14 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
|
|||||||
|
|
||||||
for i in 0..self.off_diagonal.len() {
|
for i in 0..self.off_diagonal.len() {
|
||||||
self.tri[(i + 1, i)] = self.off_diagonal[i];
|
self.tri[(i + 1, i)] = self.off_diagonal[i];
|
||||||
self.tri[(i, i + 1)] = self.off_diagonal[i];
|
self.tri[(i, i + 1)] = self.off_diagonal[i].conjugate();
|
||||||
}
|
}
|
||||||
|
|
||||||
&q * self.tri * q.transpose()
|
&q * self.tri * q.conjugate_transpose()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Real, D: DimSub<U1>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
impl<N: Complex, D: DimSub<U1>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
||||||
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
|
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
|
||||||
{
|
{
|
||||||
/// Computes the tridiagonalization of this symmetric matrix.
|
/// Computes the tridiagonalization of this symmetric matrix.
|
||||||
|
54
tests/core/helper.rs
Normal file
54
tests/core/helper.rs
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
// This module implement several methods to fill some
|
||||||
|
// missing features of num-complex when it comes to randomness.
|
||||||
|
|
||||||
|
use quickcheck::{Arbitrary, Gen};
|
||||||
|
use rand::distributions::{Standard, Distribution};
|
||||||
|
use rand::Rng;
|
||||||
|
use num_complex::Complex;
|
||||||
|
use na::Real;
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub struct RandComplex<N>(pub Complex<N>);
|
||||||
|
|
||||||
|
impl<N: Arbitrary + Real> Arbitrary for RandComplex<N> {
|
||||||
|
#[inline]
|
||||||
|
fn arbitrary<G: Gen>(rng: &mut G) -> Self {
|
||||||
|
let im = Arbitrary::arbitrary(rng);
|
||||||
|
let re = Arbitrary::arbitrary(rng);
|
||||||
|
RandComplex(Complex::new(re, im))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<N: Real> Distribution<RandComplex<N>> for Standard
|
||||||
|
where
|
||||||
|
Standard: Distribution<N>,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn sample<'a, G: Rng + ?Sized>(&self, rng: &'a mut G) -> RandComplex<N> {
|
||||||
|
let re = rng.gen();
|
||||||
|
let im = rng.gen();
|
||||||
|
RandComplex(Complex::new(re, im))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is a wrapper similar to RandComplex, but for non-complex.
|
||||||
|
// This exists only to make generic tests easier to write.
|
||||||
|
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub struct RandScalar<N>(pub N);
|
||||||
|
|
||||||
|
impl<N: Arbitrary> Arbitrary for RandScalar<N> {
|
||||||
|
#[inline]
|
||||||
|
fn arbitrary<G: Gen>(rng: &mut G) -> Self {
|
||||||
|
RandScalar(Arbitrary::arbitrary(rng))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<N: Real> Distribution<RandScalar<N>> for Standard
|
||||||
|
where
|
||||||
|
Standard: Distribution<N>,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn sample<'a, G: Rng + ?Sized>(&self, rng: &'a mut G) -> RandScalar<N> {
|
||||||
|
RandScalar(self.sample(rng))
|
||||||
|
}
|
||||||
|
}
|
@ -1022,7 +1022,7 @@ mod finite_dim_inner_space_tests {
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
#[cfg(feature = "arbitrary")]
|
#[cfg(feature = "arbitrary")]
|
||||||
fn is_subspace_basis<T: FiniteDimInnerSpace<Real = f64> + Display>(vs: &[T]) -> bool {
|
fn is_subspace_basis<T: FiniteDimInnerSpace<Real = f64, Complex = f64> + Display>(vs: &[T]) -> bool {
|
||||||
for i in 0..vs.len() {
|
for i in 0..vs.len() {
|
||||||
// Basis elements must be normalized.
|
// Basis elements must be normalized.
|
||||||
if !relative_eq!(vs[i].norm(), 1.0, epsilon = 1.0e-7) {
|
if !relative_eq!(vs[i].norm(), 1.0, epsilon = 1.0e-7) {
|
||||||
|
@ -8,3 +8,7 @@ mod matrix_slice;
|
|||||||
#[cfg(feature = "mint")]
|
#[cfg(feature = "mint")]
|
||||||
mod mint;
|
mod mint;
|
||||||
mod serde;
|
mod serde;
|
||||||
|
|
||||||
|
|
||||||
|
#[cfg(feature = "arbitrary")]
|
||||||
|
pub mod helper;
|
@ -82,7 +82,7 @@ quickcheck!(
|
|||||||
r: Rotation2<f64>,
|
r: Rotation2<f64>,
|
||||||
t: Translation2<f64>,
|
t: Translation2<f64>,
|
||||||
v: Vector2<f64>,
|
v: Vector2<f64>,
|
||||||
p: Point2<f64>,
|
p: Point2<f64>
|
||||||
) -> bool
|
) -> bool
|
||||||
{
|
{
|
||||||
// (rotation × translation) * point = rotation × (translation * point)
|
// (rotation × translation) * point = rotation × (translation * point)
|
||||||
@ -120,7 +120,7 @@ quickcheck!(
|
|||||||
r: Rotation3<f64>,
|
r: Rotation3<f64>,
|
||||||
t: Translation3<f64>,
|
t: Translation3<f64>,
|
||||||
v: Vector3<f64>,
|
v: Vector3<f64>,
|
||||||
p: Point3<f64>,
|
p: Point3<f64>
|
||||||
) -> bool
|
) -> bool
|
||||||
{
|
{
|
||||||
// (rotation × translation) * point = rotation × (translation * point)
|
// (rotation × translation) * point = rotation × (translation * point)
|
||||||
@ -158,7 +158,7 @@ quickcheck!(
|
|||||||
t: Translation3<f64>,
|
t: Translation3<f64>,
|
||||||
v: Vector3<f64>,
|
v: Vector3<f64>,
|
||||||
p: Point3<f64>,
|
p: Point3<f64>,
|
||||||
r: Rotation3<f64>,
|
r: Rotation3<f64>
|
||||||
) -> bool
|
) -> bool
|
||||||
{
|
{
|
||||||
let iMi = i * i;
|
let iMi = i * i;
|
||||||
|
@ -19,7 +19,7 @@ quickcheck!(
|
|||||||
fn inverse_is_parts_inversion(
|
fn inverse_is_parts_inversion(
|
||||||
t: Translation3<f64>,
|
t: Translation3<f64>,
|
||||||
r: UnitQuaternion<f64>,
|
r: UnitQuaternion<f64>,
|
||||||
scaling: f64,
|
scaling: f64
|
||||||
) -> bool
|
) -> bool
|
||||||
{
|
{
|
||||||
if relative_eq!(scaling, 0.0) {
|
if relative_eq!(scaling, 0.0) {
|
||||||
@ -33,7 +33,7 @@ quickcheck!(
|
|||||||
fn multiply_equals_alga_transform(
|
fn multiply_equals_alga_transform(
|
||||||
s: Similarity3<f64>,
|
s: Similarity3<f64>,
|
||||||
v: Vector3<f64>,
|
v: Vector3<f64>,
|
||||||
p: Point3<f64>,
|
p: Point3<f64>
|
||||||
) -> bool
|
) -> bool
|
||||||
{
|
{
|
||||||
s * v == s.transform_vector(&v)
|
s * v == s.transform_vector(&v)
|
||||||
@ -56,7 +56,7 @@ quickcheck!(
|
|||||||
t: Translation3<f64>,
|
t: Translation3<f64>,
|
||||||
v: Vector3<f64>,
|
v: Vector3<f64>,
|
||||||
p: Point3<f64>,
|
p: Point3<f64>,
|
||||||
scaling: f64,
|
scaling: f64
|
||||||
) -> bool
|
) -> bool
|
||||||
{
|
{
|
||||||
if relative_eq!(scaling, 0.0) {
|
if relative_eq!(scaling, 0.0) {
|
||||||
@ -152,7 +152,7 @@ quickcheck!(
|
|||||||
uq: UnitQuaternion<f64>,
|
uq: UnitQuaternion<f64>,
|
||||||
t: Translation3<f64>,
|
t: Translation3<f64>,
|
||||||
v: Vector3<f64>,
|
v: Vector3<f64>,
|
||||||
p: Point3<f64>,
|
p: Point3<f64>
|
||||||
) -> bool
|
) -> bool
|
||||||
{
|
{
|
||||||
let sMs = s * s;
|
let sMs = s * s;
|
||||||
|
@ -72,7 +72,7 @@ quickcheck!(
|
|||||||
uc: UnitComplex<f64>,
|
uc: UnitComplex<f64>,
|
||||||
v: Vector2<f64>,
|
v: Vector2<f64>,
|
||||||
p: Point2<f64>,
|
p: Point2<f64>,
|
||||||
r: Rotation2<f64>,
|
r: Rotation2<f64>
|
||||||
) -> bool
|
) -> bool
|
||||||
{
|
{
|
||||||
let uv = Unit::new_normalize(v);
|
let uv = Unit::new_normalize(v);
|
||||||
|
@ -12,9 +12,10 @@ extern crate num_traits as num;
|
|||||||
extern crate quickcheck;
|
extern crate quickcheck;
|
||||||
extern crate rand;
|
extern crate rand;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
|
extern crate num_complex;
|
||||||
|
|
||||||
mod core;
|
mod core;
|
||||||
mod geometry;
|
mod geometry;
|
||||||
mod linalg;
|
mod linalg;
|
||||||
#[cfg(feature = "sparse")]
|
//#[cfg(feature = "sparse")]
|
||||||
mod sparse;
|
//mod sparse;
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
#![cfg(feature = "arbitrary")]
|
#![cfg(feature = "arbitrary")]
|
||||||
|
|
||||||
use na::{DMatrix, Matrix2, Matrix3x5, Matrix4, Matrix5x3};
|
use na::{DMatrix, Matrix2, Matrix3x5, Matrix4, Matrix5x3};
|
||||||
|
use core::helper::{RandScalar, RandComplex};
|
||||||
|
|
||||||
quickcheck! {
|
quickcheck! {
|
||||||
fn bidiagonal(m: DMatrix<f64>) -> bool {
|
fn bidiagonal(m: DMatrix<RandComplex<f64>>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
if m.len() == 0 {
|
if m.len() == 0 {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -17,7 +19,8 @@ quickcheck! {
|
|||||||
relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)
|
relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bidiagonal_static_5_3(m: Matrix5x3<f64>) -> bool {
|
fn bidiagonal_static_5_3(m: Matrix5x3<RandComplex<f64>>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let bidiagonal = m.bidiagonalize();
|
let bidiagonal = m.bidiagonalize();
|
||||||
let (u, d, v_t) = bidiagonal.unpack();
|
let (u, d, v_t) = bidiagonal.unpack();
|
||||||
|
|
||||||
@ -27,7 +30,8 @@ quickcheck! {
|
|||||||
relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)
|
relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bidiagonal_static_3_5(m: Matrix3x5<f64>) -> bool {
|
fn bidiagonal_static_3_5(m: Matrix3x5<RandComplex<f64>>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let bidiagonal = m.bidiagonalize();
|
let bidiagonal = m.bidiagonalize();
|
||||||
let (u, d, v_t) = bidiagonal.unpack();
|
let (u, d, v_t) = bidiagonal.unpack();
|
||||||
|
|
||||||
@ -37,7 +41,8 @@ quickcheck! {
|
|||||||
relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)
|
relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bidiagonal_static_square(m: Matrix4<f64>) -> bool {
|
fn bidiagonal_static_square(m: Matrix4<RandComplex<f64>>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let bidiagonal = m.bidiagonalize();
|
let bidiagonal = m.bidiagonalize();
|
||||||
let (u, d, v_t) = bidiagonal.unpack();
|
let (u, d, v_t) = bidiagonal.unpack();
|
||||||
|
|
||||||
@ -47,7 +52,8 @@ quickcheck! {
|
|||||||
relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)
|
relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bidiagonal_static_square_2x2(m: Matrix2<f64>) -> bool {
|
fn bidiagonal_static_square_2x2(m: Matrix2<RandComplex<f64>>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let bidiagonal = m.bidiagonalize();
|
let bidiagonal = m.bidiagonalize();
|
||||||
let (u, d, v_t) = bidiagonal.unpack();
|
let (u, d, v_t) = bidiagonal.unpack();
|
||||||
|
|
||||||
|
@ -5,12 +5,13 @@ use na::DMatrix;
|
|||||||
#[cfg(feature = "arbitrary")]
|
#[cfg(feature = "arbitrary")]
|
||||||
mod quickcheck_tests {
|
mod quickcheck_tests {
|
||||||
use na::{DMatrix, Matrix2, Matrix3, Matrix4};
|
use na::{DMatrix, Matrix2, Matrix3, Matrix4};
|
||||||
|
use core::helper::{RandScalar, RandComplex};
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
|
|
||||||
quickcheck! {
|
quickcheck! {
|
||||||
fn symmetric_eigen(n: usize) -> bool {
|
fn symmetric_eigen(n: usize) -> bool {
|
||||||
let n = cmp::max(1, cmp::min(n, 10));
|
let n = cmp::max(1, cmp::min(n, 10));
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<RandComplex<f64>>::new_random(n, n).map(|e| e.0);
|
||||||
let eig = m.clone().symmetric_eigen();
|
let eig = m.clone().symmetric_eigen();
|
||||||
let recomp = eig.recompose();
|
let recomp = eig.recompose();
|
||||||
|
|
||||||
@ -21,9 +22,9 @@ mod quickcheck_tests {
|
|||||||
|
|
||||||
fn symmetric_eigen_singular(n: usize) -> bool {
|
fn symmetric_eigen_singular(n: usize) -> bool {
|
||||||
let n = cmp::max(1, cmp::min(n, 10));
|
let n = cmp::max(1, cmp::min(n, 10));
|
||||||
let mut m = DMatrix::<f64>::new_random(n, n);
|
let mut m = DMatrix::<RandComplex<f64>>::new_random(n, n).map(|e| e.0);
|
||||||
m.row_mut(n / 2).fill(0.0);
|
m.row_mut(n / 2).fill(na::zero());
|
||||||
m.column_mut(n / 2).fill(0.0);
|
m.column_mut(n / 2).fill(na::zero());
|
||||||
let eig = m.clone().symmetric_eigen();
|
let eig = m.clone().symmetric_eigen();
|
||||||
let recomp = eig.recompose();
|
let recomp = eig.recompose();
|
||||||
|
|
||||||
@ -32,7 +33,8 @@ mod quickcheck_tests {
|
|||||||
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)
|
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn symmetric_eigen_static_square_4x4(m: Matrix4<f64>) -> bool {
|
fn symmetric_eigen_static_square_4x4(m: Matrix4<RandComplex<f64>>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let eig = m.symmetric_eigen();
|
let eig = m.symmetric_eigen();
|
||||||
let recomp = eig.recompose();
|
let recomp = eig.recompose();
|
||||||
|
|
||||||
@ -41,7 +43,8 @@ mod quickcheck_tests {
|
|||||||
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)
|
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn symmetric_eigen_static_square_3x3(m: Matrix3<f64>) -> bool {
|
fn symmetric_eigen_static_square_3x3(m: Matrix3<RandComplex<f64>>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let eig = m.symmetric_eigen();
|
let eig = m.symmetric_eigen();
|
||||||
let recomp = eig.recompose();
|
let recomp = eig.recompose();
|
||||||
|
|
||||||
@ -50,7 +53,8 @@ mod quickcheck_tests {
|
|||||||
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)
|
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn symmetric_eigen_static_square_2x2(m: Matrix2<f64>) -> bool {
|
fn symmetric_eigen_static_square_2x2(m: Matrix2<RandComplex<f64>>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let eig = m.symmetric_eigen();
|
let eig = m.symmetric_eigen();
|
||||||
let recomp = eig.recompose();
|
let recomp = eig.recompose();
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#![cfg(feature = "arbitrary")]
|
#![cfg(feature = "arbitrary")]
|
||||||
|
|
||||||
use na::{DMatrix, Matrix2, Matrix4};
|
use na::{DMatrix, Matrix2, Matrix4};
|
||||||
|
use core::helper::{RandScalar, RandComplex};
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -14,20 +15,22 @@ fn hessenberg_simple() {
|
|||||||
quickcheck! {
|
quickcheck! {
|
||||||
fn hessenberg(n: usize) -> bool {
|
fn hessenberg(n: usize) -> bool {
|
||||||
let n = cmp::max(1, cmp::min(n, 50));
|
let n = cmp::max(1, cmp::min(n, 50));
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<RandComplex<f64>>::new_random(n, n).map(|e| e.0);
|
||||||
|
|
||||||
let hess = m.clone().hessenberg();
|
let hess = m.clone().hessenberg();
|
||||||
let (p, h) = hess.unpack();
|
let (p, h) = hess.unpack();
|
||||||
relative_eq!(m, &p * h * p.transpose(), epsilon = 1.0e-7)
|
relative_eq!(m, &p * h * p.transpose(), epsilon = 1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn hessenberg_static_mat2(m: Matrix2<f64>) -> bool {
|
fn hessenberg_static_mat2(m: Matrix2<RandComplex<f64>>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let hess = m.hessenberg();
|
let hess = m.hessenberg();
|
||||||
let (p, h) = hess.unpack();
|
let (p, h) = hess.unpack();
|
||||||
relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7)
|
relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn hessenberg_static(m: Matrix4<f64>) -> bool {
|
fn hessenberg_static(m: Matrix4<RandComplex<f64>>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let hess = m.hessenberg();
|
let hess = m.hessenberg();
|
||||||
let (p, h) = hess.unpack();
|
let (p, h) = hess.unpack();
|
||||||
relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7)
|
relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7)
|
||||||
|
@ -40,16 +40,25 @@ fn lu_simple_with_pivot() {
|
|||||||
|
|
||||||
#[cfg(feature = "arbitrary")]
|
#[cfg(feature = "arbitrary")]
|
||||||
mod quickcheck_tests {
|
mod quickcheck_tests {
|
||||||
|
use core::helper::{RandScalar, RandComplex};
|
||||||
|
|
||||||
|
macro_rules! gen_tests(
|
||||||
|
($module: ident, $scalar: ty) => {
|
||||||
|
mod $module {
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use na::{DMatrix, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5, DVector, Vector4};
|
use na::{DMatrix, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5, DVector, Vector4};
|
||||||
|
#[allow(unused_imports)]
|
||||||
|
use core::helper::{RandScalar, RandComplex};
|
||||||
|
|
||||||
quickcheck! {
|
quickcheck! {
|
||||||
fn lu(m: DMatrix<f64>) -> bool {
|
fn lu(m: DMatrix<$scalar>) -> bool {
|
||||||
let mut m = m;
|
let mut m = m;
|
||||||
if m.len() == 0 {
|
if m.len() == 0 {
|
||||||
m = DMatrix::new_random(1, 1);
|
m = DMatrix::<$scalar>::new_random(1, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
|
|
||||||
let lu = m.clone().lu();
|
let lu = m.clone().lu();
|
||||||
let (p, l, u) = lu.unpack();
|
let (p, l, u) = lu.unpack();
|
||||||
let mut lu = l * u;
|
let mut lu = l * u;
|
||||||
@ -58,7 +67,8 @@ mod quickcheck_tests {
|
|||||||
relative_eq!(m, lu, epsilon = 1.0e-7)
|
relative_eq!(m, lu, epsilon = 1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lu_static_3_5(m: Matrix3x5<f64>) -> bool {
|
fn lu_static_3_5(m: Matrix3x5<$scalar>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let lu = m.lu();
|
let lu = m.lu();
|
||||||
let (p, l, u) = lu.unpack();
|
let (p, l, u) = lu.unpack();
|
||||||
let mut lu = l * u;
|
let mut lu = l * u;
|
||||||
@ -67,7 +77,8 @@ mod quickcheck_tests {
|
|||||||
relative_eq!(m, lu, epsilon = 1.0e-7)
|
relative_eq!(m, lu, epsilon = 1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lu_static_5_3(m: Matrix5x3<f64>) -> bool {
|
fn lu_static_5_3(m: Matrix5x3<$scalar>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let lu = m.lu();
|
let lu = m.lu();
|
||||||
let (p, l, u) = lu.unpack();
|
let (p, l, u) = lu.unpack();
|
||||||
let mut lu = l * u;
|
let mut lu = l * u;
|
||||||
@ -76,7 +87,8 @@ mod quickcheck_tests {
|
|||||||
relative_eq!(m, lu, epsilon = 1.0e-7)
|
relative_eq!(m, lu, epsilon = 1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lu_static_square(m: Matrix4<f64>) -> bool {
|
fn lu_static_square(m: Matrix4<$scalar>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let lu = m.lu();
|
let lu = m.lu();
|
||||||
let (p, l, u) = lu.unpack();
|
let (p, l, u) = lu.unpack();
|
||||||
let mut lu = l * u;
|
let mut lu = l * u;
|
||||||
@ -89,11 +101,11 @@ mod quickcheck_tests {
|
|||||||
if n != 0 && nb != 0 {
|
if n != 0 && nb != 0 {
|
||||||
let n = cmp::min(n, 50); // To avoid slowing down the test too much.
|
let n = cmp::min(n, 50); // To avoid slowing down the test too much.
|
||||||
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
|
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0);
|
||||||
|
|
||||||
let lu = m.clone().lu();
|
let lu = m.clone().lu();
|
||||||
let b1 = DVector::new_random(n);
|
let b1 = DVector::<$scalar>::new_random(n).map(|e| e.0);
|
||||||
let b2 = DMatrix::new_random(n, nb);
|
let b2 = DMatrix::<$scalar>::new_random(n, nb).map(|e| e.0);
|
||||||
|
|
||||||
let sol1 = lu.solve(&b1);
|
let sol1 = lu.solve(&b1);
|
||||||
let sol2 = lu.solve(&b2);
|
let sol2 = lu.solve(&b2);
|
||||||
@ -105,10 +117,11 @@ mod quickcheck_tests {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lu_solve_static(m: Matrix4<f64>) -> bool {
|
fn lu_solve_static(m: Matrix4<$scalar>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let lu = m.lu();
|
let lu = m.lu();
|
||||||
let b1 = Vector4::new_random();
|
let b1 = Vector4::<$scalar>::new_random().map(|e| e.0);
|
||||||
let b2 = Matrix4x3::new_random();
|
let b2 = Matrix4x3::<$scalar>::new_random().map(|e| e.0);
|
||||||
|
|
||||||
let sol1 = lu.solve(&b1);
|
let sol1 = lu.solve(&b1);
|
||||||
let sol2 = lu.solve(&b2);
|
let sol2 = lu.solve(&b2);
|
||||||
@ -119,14 +132,14 @@ mod quickcheck_tests {
|
|||||||
|
|
||||||
fn lu_inverse(n: usize) -> bool {
|
fn lu_inverse(n: usize) -> bool {
|
||||||
let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much.
|
let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much.
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0);
|
||||||
|
|
||||||
let mut l = m.lower_triangle();
|
let mut l = m.lower_triangle();
|
||||||
let mut u = m.upper_triangle();
|
let mut u = m.upper_triangle();
|
||||||
|
|
||||||
// Ensure the matrix is well conditioned for inversion.
|
// Ensure the matrix is well conditioned for inversion.
|
||||||
l.fill_diagonal(1.0);
|
l.fill_diagonal(na::one());
|
||||||
u.fill_diagonal(1.0);
|
u.fill_diagonal(na::one());
|
||||||
let m = l * u;
|
let m = l * u;
|
||||||
|
|
||||||
let m1 = m.clone().lu().try_inverse().unwrap();
|
let m1 = m.clone().lu().try_inverse().unwrap();
|
||||||
@ -136,7 +149,8 @@ mod quickcheck_tests {
|
|||||||
return id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5);
|
return id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lu_inverse_static(m: Matrix4<f64>) -> bool {
|
fn lu_inverse_static(m: Matrix4<$scalar>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let lu = m.lu();
|
let lu = m.lu();
|
||||||
|
|
||||||
if let Some(m1) = lu.try_inverse() {
|
if let Some(m1) = lu.try_inverse() {
|
||||||
@ -150,4 +164,10 @@ mod quickcheck_tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
gen_tests!(complex, RandComplex<f64>);
|
||||||
|
gen_tests!(f64, RandScalar<f64>);
|
||||||
}
|
}
|
||||||
|
@ -1,19 +1,30 @@
|
|||||||
#![cfg(feature = "arbitrary")]
|
#![cfg(feature = "arbitrary")]
|
||||||
|
|
||||||
use na::{DMatrix, DVector, Matrix3x5, Matrix4, Matrix4x3, Matrix5x3, Vector4};
|
use core::helper::{RandScalar, RandComplex};
|
||||||
use std::cmp;
|
|
||||||
|
|
||||||
quickcheck! {
|
macro_rules! gen_tests(
|
||||||
fn qr(m: DMatrix<f64>) -> bool {
|
($module: ident, $scalar: ty) => {
|
||||||
|
mod $module {
|
||||||
|
use na::{DMatrix, DVector, Matrix3x5, Matrix4, Matrix4x3, Matrix5x3, Vector4};
|
||||||
|
use std::cmp;
|
||||||
|
use core::helper::{RandScalar, RandComplex};
|
||||||
|
|
||||||
|
quickcheck! {
|
||||||
|
fn qr(m: DMatrix<$scalar>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let qr = m.clone().qr();
|
let qr = m.clone().qr();
|
||||||
let q = qr.q();
|
let q = qr.q();
|
||||||
let r = qr.r();
|
let r = qr.r();
|
||||||
|
|
||||||
|
println!("m: {}", m);
|
||||||
|
println!("qr: {}", &q * &r);
|
||||||
|
|
||||||
relative_eq!(m, &q * r, epsilon = 1.0e-7) &&
|
relative_eq!(m, &q * r, epsilon = 1.0e-7) &&
|
||||||
q.is_orthogonal(1.0e-7)
|
q.is_orthogonal(1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn qr_static_5_3(m: Matrix5x3<f64>) -> bool {
|
fn qr_static_5_3(m: Matrix5x3<$scalar>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let qr = m.qr();
|
let qr = m.qr();
|
||||||
let q = qr.q();
|
let q = qr.q();
|
||||||
let r = qr.r();
|
let r = qr.r();
|
||||||
@ -22,7 +33,8 @@ quickcheck! {
|
|||||||
q.is_orthogonal(1.0e-7)
|
q.is_orthogonal(1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn qr_static_3_5(m: Matrix3x5<f64>) -> bool {
|
fn qr_static_3_5(m: Matrix3x5<$scalar>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let qr = m.qr();
|
let qr = m.qr();
|
||||||
let q = qr.q();
|
let q = qr.q();
|
||||||
let r = qr.r();
|
let r = qr.r();
|
||||||
@ -31,7 +43,8 @@ quickcheck! {
|
|||||||
q.is_orthogonal(1.0e-7)
|
q.is_orthogonal(1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn qr_static_square(m: Matrix4<f64>) -> bool {
|
fn qr_static_square(m: Matrix4<$scalar>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let qr = m.qr();
|
let qr = m.qr();
|
||||||
let q = qr.q();
|
let q = qr.q();
|
||||||
let r = qr.r();
|
let r = qr.r();
|
||||||
@ -46,11 +59,11 @@ quickcheck! {
|
|||||||
if n != 0 && nb != 0 {
|
if n != 0 && nb != 0 {
|
||||||
let n = cmp::min(n, 50); // To avoid slowing down the test too much.
|
let n = cmp::min(n, 50); // To avoid slowing down the test too much.
|
||||||
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
|
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0);
|
||||||
|
|
||||||
let qr = m.clone().qr();
|
let qr = m.clone().qr();
|
||||||
let b1 = DVector::new_random(n);
|
let b1 = DVector::<$scalar>::new_random(n).map(|e| e.0);
|
||||||
let b2 = DMatrix::new_random(n, nb);
|
let b2 = DMatrix::<$scalar>::new_random(n, nb).map(|e| e.0);
|
||||||
|
|
||||||
if qr.is_invertible() {
|
if qr.is_invertible() {
|
||||||
let sol1 = qr.solve(&b1).unwrap();
|
let sol1 = qr.solve(&b1).unwrap();
|
||||||
@ -64,10 +77,11 @@ quickcheck! {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn qr_solve_static(m: Matrix4<f64>) -> bool {
|
fn qr_solve_static(m: Matrix4<$scalar>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let qr = m.qr();
|
let qr = m.qr();
|
||||||
let b1 = Vector4::new_random();
|
let b1 = Vector4::<$scalar>::new_random().map(|e| e.0);
|
||||||
let b2 = Matrix4x3::new_random();
|
let b2 = Matrix4x3::<$scalar>::new_random().map(|e| e.0);
|
||||||
|
|
||||||
if qr.is_invertible() {
|
if qr.is_invertible() {
|
||||||
let sol1 = qr.solve(&b1).unwrap();
|
let sol1 = qr.solve(&b1).unwrap();
|
||||||
@ -83,7 +97,7 @@ quickcheck! {
|
|||||||
|
|
||||||
fn qr_inverse(n: usize) -> bool {
|
fn qr_inverse(n: usize) -> bool {
|
||||||
let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much.
|
let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much.
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0);
|
||||||
|
|
||||||
if let Some(m1) = m.clone().qr().try_inverse() {
|
if let Some(m1) = m.clone().qr().try_inverse() {
|
||||||
let id1 = &m * &m1;
|
let id1 = &m * &m1;
|
||||||
@ -96,7 +110,8 @@ quickcheck! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn qr_inverse_static(m: Matrix4<f64>) -> bool {
|
fn qr_inverse_static(m: Matrix4<$scalar>) -> bool {
|
||||||
|
let m = m.map(|e| e.0);
|
||||||
let qr = m.qr();
|
let qr = m.qr();
|
||||||
|
|
||||||
if let Some(m1) = qr.try_inverse() {
|
if let Some(m1) = qr.try_inverse() {
|
||||||
@ -109,4 +124,10 @@ quickcheck! {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
gen_tests!(complex, RandComplex<f64>);
|
||||||
|
gen_tests!(f64, RandScalar<f64>);
|
||||||
|
@ -3,12 +3,24 @@
|
|||||||
use std::cmp;
|
use std::cmp;
|
||||||
|
|
||||||
use na::{DMatrix, Matrix2, Matrix4};
|
use na::{DMatrix, Matrix2, Matrix4};
|
||||||
|
use core::helper::{RandScalar, RandComplex};
|
||||||
|
|
||||||
quickcheck! {
|
quickcheck! {
|
||||||
fn symm_tridiagonal(n: usize) -> bool {
|
// fn symm_tridiagonal(n: usize) -> bool {
|
||||||
let n = cmp::max(1, cmp::min(n, 50));
|
// let n = cmp::max(1, cmp::min(n, 50));
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
// let m = DMatrix::<RandComplex<f64>>::new_random(n, n).map(|e| e.0).hermitian_part();
|
||||||
let tri = m.clone().symmetric_tridiagonalize();
|
// let tri = m.clone().symmetric_tridiagonalize();
|
||||||
|
// let recomp = tri.recompose();
|
||||||
|
//
|
||||||
|
// println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
||||||
|
//
|
||||||
|
// relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)
|
||||||
|
// }
|
||||||
|
|
||||||
|
fn symm_tridiagonal_static_square(m: Matrix4<RandComplex<f64>>) -> bool {
|
||||||
|
let m = m.map(|e| e.0).hermitian_part();
|
||||||
|
let tri = m.symmetric_tridiagonalize();
|
||||||
|
println!("Internal tri: {}{}", tri.internal_tri(), tri.off_diagonal());
|
||||||
let recomp = tri.recompose();
|
let recomp = tri.recompose();
|
||||||
|
|
||||||
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
||||||
@ -16,20 +28,11 @@ quickcheck! {
|
|||||||
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)
|
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn symm_tridiagonal_static_square(m: Matrix4<f64>) -> bool {
|
// fn symm_tridiagonal_static_square_2x2(m: Matrix2<RandComplex<f64>>) -> bool {
|
||||||
let tri = m.symmetric_tridiagonalize();
|
// let m = m.map(|e| e.0).hermitian_part();
|
||||||
println!("{}{}", tri.internal_tri(), tri.off_diagonal());
|
// let tri = m.symmetric_tridiagonalize();
|
||||||
let recomp = tri.recompose();
|
// let recomp = tri.recompose();
|
||||||
|
//
|
||||||
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
// relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)
|
||||||
|
// }
|
||||||
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn symm_tridiagonal_static_square_2x2(m: Matrix2<f64>) -> bool {
|
|
||||||
let tri = m.symmetric_tridiagonalize();
|
|
||||||
let recomp = tri.recompose();
|
|
||||||
|
|
||||||
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user