Implement some BLAS opertaions involving adjoint.

This commit is contained in:
sebcrozet 2019-03-23 11:48:12 +01:00
parent 1001e8ee0f
commit 921a05d523
20 changed files with 617 additions and 465 deletions

View File

@ -214,7 +214,7 @@ where
} }
} }
/// Solves the linear system `self.conjugate_transpose() * x = b`, where `x` is the unknown to /// Solves the linear system `self.adjoint() * x = b`, where `x` is the unknown to
/// be determined. /// be determined.
pub fn solve_conjugate_transpose<R2: Dim, C2: Dim, S2>( pub fn solve_conjugate_transpose<R2: Dim, C2: Dim, S2>(
&self, &self,
@ -249,7 +249,7 @@ where
self.generic_solve_mut(b'T', b) self.generic_solve_mut(b'T', b)
} }
/// Solves in-place the linear system `self.conjugate_transpose() * x = b`, where `x` is the unknown to /// Solves in-place the linear system `self.adjoint() * x = b`, where `x` is the unknown to
/// be determined. /// be determined.
/// ///
/// Returns `false` if no solution was found (the decomposed matrix is singular). /// Returns `false` if no solution was found (the decomposed matrix is singular).

View File

@ -11,7 +11,7 @@ use base::constraint::{
}; };
use base::dimension::{Dim, Dynamic, U1, U2, U3, U4}; use base::dimension::{Dim, Dynamic, U1, U2, U3, U4};
use base::storage::{Storage, StorageMut}; use base::storage::{Storage, StorageMut};
use base::{DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector}; use base::{DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector, DVectorSlice};
// FIXME: find a way to avoid code duplication just for complex number support. // FIXME: find a way to avoid code duplication just for complex number support.
@ -32,11 +32,11 @@ impl<N: Complex, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
pub fn icamax(&self) -> usize { pub fn icamax(&self) -> usize {
assert!(!self.is_empty(), "The input vector must not be empty."); assert!(!self.is_empty(), "The input vector must not be empty.");
let mut the_max = unsafe { self.vget_unchecked(0).asum() }; let mut the_max = unsafe { self.vget_unchecked(0).norm1() };
let mut the_i = 0; let mut the_i = 0;
for i in 1..self.nrows() { for i in 1..self.nrows() {
let val = unsafe { self.vget_unchecked(i).asum() }; let val = unsafe { self.vget_unchecked(i).norm1() };
if val > the_max { if val > the_max {
the_max = val; the_max = val;
@ -211,12 +211,12 @@ impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
pub fn icamax_full(&self) -> (usize, usize) { pub fn icamax_full(&self) -> (usize, usize) {
assert!(!self.is_empty(), "The input matrix must not be empty."); assert!(!self.is_empty(), "The input matrix must not be empty.");
let mut the_max = unsafe { self.get_unchecked((0, 0)).asum() }; let mut the_max = unsafe { self.get_unchecked((0, 0)).norm1() };
let mut the_ij = (0, 0); let mut the_ij = (0, 0);
for j in 0..self.ncols() { for j in 0..self.ncols() {
for i in 0..self.nrows() { for i in 0..self.nrows() {
let val = unsafe { self.get_unchecked((i, j)).asum() }; let val = unsafe { self.get_unchecked((i, j)).norm1() };
if val > the_max { if val > the_max {
the_max = val; the_max = val;
@ -263,13 +263,11 @@ impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matri
} }
} }
impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> { impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
/// The dot product between two complex or real vectors or matrices (seen as vectors). where N: Scalar + Zero + ClosedAdd + ClosedMul
/// {
/// This is the same as `.dot` except that the conjugate of each component of `self` is taken #[inline(always)]
/// before performing the products. fn dotx<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>, conjugate: impl Fn(N) -> N) -> N
#[inline]
pub fn cdot<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> N
where where
SB: Storage<N, R2, C2>, SB: Storage<N, R2, C2>,
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>, ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
@ -283,27 +281,27 @@ impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
// because the `for` loop below won't be very efficient on those. // because the `for` loop below won't be very efficient on those.
if (R::is::<U2>() || R2::is::<U2>()) && (C::is::<U1>() || C2::is::<U1>()) { if (R::is::<U2>() || R2::is::<U2>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe { unsafe {
let a = self.get_unchecked((0, 0)).conjugate() * *rhs.get_unchecked((0, 0)); let a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0));
let b = self.get_unchecked((1, 0)).conjugate() * *rhs.get_unchecked((1, 0)); let b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0));
return a + b; return a + b;
} }
} }
if (R::is::<U3>() || R2::is::<U3>()) && (C::is::<U1>() || C2::is::<U1>()) { if (R::is::<U3>() || R2::is::<U3>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe { unsafe {
let a = self.get_unchecked((0, 0)).conjugate() * *rhs.get_unchecked((0, 0)); let a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0));
let b = self.get_unchecked((1, 0)).conjugate() * *rhs.get_unchecked((1, 0)); let b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0));
let c = self.get_unchecked((2, 0)).conjugate() * *rhs.get_unchecked((2, 0)); let c = conjugate(*self.get_unchecked((2, 0))) * *rhs.get_unchecked((2, 0));
return a + b + c; return a + b + c;
} }
} }
if (R::is::<U4>() || R2::is::<U4>()) && (C::is::<U1>() || C2::is::<U1>()) { if (R::is::<U4>() || R2::is::<U4>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe { unsafe {
let mut a = self.get_unchecked((0, 0)).conjugate() * *rhs.get_unchecked((0, 0)); let mut a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0));
let mut b = self.get_unchecked((1, 0)).conjugate() * *rhs.get_unchecked((1, 0)); let mut b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0));
let c = self.get_unchecked((2, 0)).conjugate() * *rhs.get_unchecked((2, 0)); let c = conjugate(*self.get_unchecked((2, 0))) * *rhs.get_unchecked((2, 0));
let d = self.get_unchecked((3, 0)).conjugate() * *rhs.get_unchecked((3, 0)); let d = conjugate(*self.get_unchecked((3, 0))) * *rhs.get_unchecked((3, 0));
a += c; a += c;
b += d; b += d;
@ -343,14 +341,14 @@ impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
acc7 = N::zero(); acc7 = N::zero();
while self.nrows() - i >= 8 { while self.nrows() - i >= 8 {
acc0 += unsafe { self.get_unchecked((i + 0, j)).conjugate() * *rhs.get_unchecked((i + 0, j)) }; acc0 += unsafe { conjugate(*self.get_unchecked((i + 0, j))) * *rhs.get_unchecked((i + 0, j)) };
acc1 += unsafe { self.get_unchecked((i + 1, j)).conjugate() * *rhs.get_unchecked((i + 1, j)) }; acc1 += unsafe { conjugate(*self.get_unchecked((i + 1, j))) * *rhs.get_unchecked((i + 1, j)) };
acc2 += unsafe { self.get_unchecked((i + 2, j)).conjugate() * *rhs.get_unchecked((i + 2, j)) }; acc2 += unsafe { conjugate(*self.get_unchecked((i + 2, j))) * *rhs.get_unchecked((i + 2, j)) };
acc3 += unsafe { self.get_unchecked((i + 3, j)).conjugate() * *rhs.get_unchecked((i + 3, j)) }; acc3 += unsafe { conjugate(*self.get_unchecked((i + 3, j))) * *rhs.get_unchecked((i + 3, j)) };
acc4 += unsafe { self.get_unchecked((i + 4, j)).conjugate() * *rhs.get_unchecked((i + 4, j)) }; acc4 += unsafe { conjugate(*self.get_unchecked((i + 4, j))) * *rhs.get_unchecked((i + 4, j)) };
acc5 += unsafe { self.get_unchecked((i + 5, j)).conjugate() * *rhs.get_unchecked((i + 5, j)) }; acc5 += unsafe { conjugate(*self.get_unchecked((i + 5, j))) * *rhs.get_unchecked((i + 5, j)) };
acc6 += unsafe { self.get_unchecked((i + 6, j)).conjugate() * *rhs.get_unchecked((i + 6, j)) }; acc6 += unsafe { conjugate(*self.get_unchecked((i + 6, j))) * *rhs.get_unchecked((i + 6, j)) };
acc7 += unsafe { self.get_unchecked((i + 7, j)).conjugate() * *rhs.get_unchecked((i + 7, j)) }; acc7 += unsafe { conjugate(*self.get_unchecked((i + 7, j))) * *rhs.get_unchecked((i + 7, j)) };
i += 8; i += 8;
} }
@ -360,17 +358,14 @@ impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
res += acc3 + acc7; res += acc3 + acc7;
for k in i..self.nrows() { for k in i..self.nrows() {
res += unsafe { self.get_unchecked((k, j)).conjugate() * *rhs.get_unchecked((k, j)) } res += unsafe { conjugate(*self.get_unchecked((k, j))) * *rhs.get_unchecked((k, j)) }
} }
} }
res res
} }
}
impl<N, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul
{
/// The dot product between two vectors or matrices (seen as vectors). /// The dot product between two vectors or matrices (seen as vectors).
/// ///
/// Note that this is **not** the matrix multiplication as in, e.g., numpy. For matrix /// Note that this is **not** the matrix multiplication as in, e.g., numpy. For matrix
@ -396,97 +391,36 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
SB: Storage<N, R2, C2>, SB: Storage<N, R2, C2>,
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>, ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
{ {
assert!( self.dotx(rhs, |e| e)
self.nrows() == rhs.nrows(),
"Dot product dimensions mismatch."
);
// So we do some special cases for common fixed-size vectors of dimension lower than 8
// because the `for` loop below won't be very efficient on those.
if (R::is::<U2>() || R2::is::<U2>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe {
let a = *self.get_unchecked((0, 0)) * *rhs.get_unchecked((0, 0));
let b = *self.get_unchecked((1, 0)) * *rhs.get_unchecked((1, 0));
return a + b;
}
}
if (R::is::<U3>() || R2::is::<U3>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe {
let a = *self.get_unchecked((0, 0)) * *rhs.get_unchecked((0, 0));
let b = *self.get_unchecked((1, 0)) * *rhs.get_unchecked((1, 0));
let c = *self.get_unchecked((2, 0)) * *rhs.get_unchecked((2, 0));
return a + b + c;
}
}
if (R::is::<U4>() || R2::is::<U4>()) && (C::is::<U1>() || C2::is::<U1>()) {
unsafe {
let mut a = *self.get_unchecked((0, 0)) * *rhs.get_unchecked((0, 0));
let mut b = *self.get_unchecked((1, 0)) * *rhs.get_unchecked((1, 0));
let c = *self.get_unchecked((2, 0)) * *rhs.get_unchecked((2, 0));
let d = *self.get_unchecked((3, 0)) * *rhs.get_unchecked((3, 0));
a += c;
b += d;
return a + b;
}
} }
// All this is inspired from the "unrolled version" discussed in: /// The dot product between two vectors or matrices (seen as vectors).
// http://blog.theincredibleholk.org/blog/2012/12/10/optimizing-dot-product/ ///
// /// Note that this is **not** the matrix multiplication as in, e.g., numpy. For matrix
// And this comment from bluss: /// multiplication, use one of: `.gemm`, `.mul_to`, `.mul`, the `*` operator.
// https://users.rust-lang.org/t/how-to-zip-two-slices-efficiently/2048/12 ///
let mut res = N::zero(); /// # Examples:
///
// We have to define them outside of the loop (and not inside at first assignment) /// ```
// otherwise vectorization won't kick in for some reason. /// # use nalgebra::{Vector3, Matrix2x3};
let mut acc0; /// let vec1 = Vector3::new(1.0, 2.0, 3.0);
let mut acc1; /// let vec2 = Vector3::new(0.1, 0.2, 0.3);
let mut acc2; /// assert_eq!(vec1.dot(&vec2), 1.4);
let mut acc3; ///
let mut acc4; /// let mat1 = Matrix2x3::new(1.0, 2.0, 3.0,
let mut acc5; /// 4.0, 5.0, 6.0);
let mut acc6; /// let mat2 = Matrix2x3::new(0.1, 0.2, 0.3,
let mut acc7; /// 0.4, 0.5, 0.6);
/// assert_eq!(mat1.dot(&mat2), 9.1);
for j in 0..self.ncols() { /// ```
let mut i = 0; #[inline]
pub fn dotc<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> N
acc0 = N::zero(); where
acc1 = N::zero(); N: Complex,
acc2 = N::zero(); SB: Storage<N, R2, C2>,
acc3 = N::zero(); ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
acc4 = N::zero(); {
acc5 = N::zero(); self.dotx(rhs, Complex::conjugate)
acc6 = N::zero();
acc7 = N::zero();
while self.nrows() - i >= 8 {
acc0 += unsafe { *self.get_unchecked((i + 0, j)) * *rhs.get_unchecked((i + 0, j)) };
acc1 += unsafe { *self.get_unchecked((i + 1, j)) * *rhs.get_unchecked((i + 1, j)) };
acc2 += unsafe { *self.get_unchecked((i + 2, j)) * *rhs.get_unchecked((i + 2, j)) };
acc3 += unsafe { *self.get_unchecked((i + 3, j)) * *rhs.get_unchecked((i + 3, j)) };
acc4 += unsafe { *self.get_unchecked((i + 4, j)) * *rhs.get_unchecked((i + 4, j)) };
acc5 += unsafe { *self.get_unchecked((i + 5, j)) * *rhs.get_unchecked((i + 5, j)) };
acc6 += unsafe { *self.get_unchecked((i + 6, j)) * *rhs.get_unchecked((i + 6, j)) };
acc7 += unsafe { *self.get_unchecked((i + 7, j)) * *rhs.get_unchecked((i + 7, j)) };
i += 8;
}
res += acc0 + acc4;
res += acc1 + acc5;
res += acc2 + acc6;
res += acc3 + acc7;
for k in i..self.nrows() {
res += unsafe { *self.get_unchecked((k, j)) * *rhs.get_unchecked((k, j)) }
}
}
res
} }
/// The dot product between the transpose of `self` and `rhs`. /// The dot product between the transpose of `self` and `rhs`.
@ -643,40 +577,15 @@ where
} }
} }
/// Computes `self = alpha * a * x + beta * self`, where `a` is a **symmetric** matrix, `x` a
/// vector, and `alpha, beta` two scalars. #[inline(always)]
/// fn xgemv<D2: Dim, D3: Dim, SB, SC>(
/// If `beta` is zero, `self` is never read. If `self` is read, only its lower-triangular part
/// (including the diagonal) is actually read.
///
/// # Examples:
///
/// ```
/// # use nalgebra::{Matrix2, Vector2};
/// let mat = Matrix2::new(1.0, 2.0,
/// 2.0, 4.0);
/// let mut vec1 = Vector2::new(1.0, 2.0);
/// let vec2 = Vector2::new(0.1, 0.2);
/// vec1.gemv_symm(10.0, &mat, &vec2, 5.0);
/// assert_eq!(vec1, Vector2::new(10.0, 20.0));
///
///
/// // The matrix upper-triangular elements can be garbage because it is never
/// // read by this method. Therefore, it is not necessary for the caller to
/// // fill the matrix struct upper-triangle.
/// let mat = Matrix2::new(1.0, 9999999.9999999,
/// 2.0, 4.0);
/// let mut vec1 = Vector2::new(1.0, 2.0);
/// vec1.gemv_symm(10.0, &mat, &vec2, 5.0);
/// assert_eq!(vec1, Vector2::new(10.0, 20.0));
/// ```
#[inline]
pub fn gemv_symm<D2: Dim, D3: Dim, SB, SC>(
&mut self, &mut self,
alpha: N, alpha: N,
a: &SquareMatrix<N, D2, SB>, a: &SquareMatrix<N, D2, SB>,
x: &Vector<N, D3, SC>, x: &Vector<N, D3, SC>,
beta: N, beta: N,
dotc: impl Fn(&DVectorSlice<N, SB::RStride, SB::CStride>, &DVectorSlice<N, SC::RStride, SC::CStride>) -> N,
) where ) where
N: One, N: One,
SB: Storage<N, D2, D2>, SB: Storage<N, D2, D2>,
@ -687,83 +596,6 @@ where
let dim2 = a.nrows(); let dim2 = a.nrows();
let dim3 = x.nrows(); let dim3 = x.nrows();
assert!(
a.is_square(),
"Symmetric gemv: the input matrix must be square."
);
assert!(
dim2 == dim3 && dim1 == dim2,
"Symmetric gemv: dimensions mismatch."
);
if dim2 == 0 {
return;
}
// FIXME: avoid bound checks.
let col2 = a.column(0);
let val = unsafe { *x.vget_unchecked(0) };
self.axpy(alpha * val, &col2, beta);
self[0] += alpha * x.rows_range(1..).dot(&a.slice_range(1.., 0));
for j in 1..dim2 {
let col2 = a.column(j);
let dot = x.rows_range(j..).dot(&col2.rows_range(j..));
let val;
unsafe {
val = *x.vget_unchecked(j);
*self.vget_unchecked_mut(j) += alpha * dot;
}
self.rows_range_mut(j + 1..)
.axpy(alpha * val, &col2.rows_range(j + 1..), N::one());
}
}
/// Computes `self = alpha * a * x + beta * self`, where `a` is a **symmetric** matrix, `x` a
/// vector, and `alpha, beta` two scalars.
///
/// If `beta` is zero, `self` is never read. If `self` is read, only its lower-triangular part
/// (including the diagonal) is actually read.
///
/// # Examples:
///
/// ```
/// # use nalgebra::{Matrix2, Vector2};
/// let mat = Matrix2::new(1.0, 2.0,
/// 2.0, 4.0);
/// let mut vec1 = Vector2::new(1.0, 2.0);
/// let vec2 = Vector2::new(0.1, 0.2);
/// vec1.gemv_symm(10.0, &mat, &vec2, 5.0);
/// assert_eq!(vec1, Vector2::new(10.0, 20.0));
///
///
/// // The matrix upper-triangular elements can be garbage because it is never
/// // read by this method. Therefore, it is not necessary for the caller to
/// // fill the matrix struct upper-triangle.
/// let mat = Matrix2::new(1.0, 9999999.9999999,
/// 2.0, 4.0);
/// let mut vec1 = Vector2::new(1.0, 2.0);
/// vec1.gemv_symm(10.0, &mat, &vec2, 5.0);
/// assert_eq!(vec1, Vector2::new(10.0, 20.0));
/// ```
#[inline]
pub fn cgemv_symm<D2: Dim, D3: Dim, SB, SC>(
&mut self,
alpha: N,
a: &SquareMatrix<N, D2, SB>,
x: &Vector<N, D3, SC>,
beta: N,
) where
N: Complex,
SB: Storage<N, D2, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<D, D2> + AreMultipliable<D2, D2, D3, U1>,
{
let dim1 = self.nrows();
let dim2 = a.nrows();
let dim3 = x.nrows();
assert!( assert!(
a.is_square(), a.is_square(),
"Symmetric cgemv: the input matrix must be square." "Symmetric cgemv: the input matrix must be square."
@ -781,11 +613,11 @@ where
let col2 = a.column(0); let col2 = a.column(0);
let val = unsafe { *x.vget_unchecked(0) }; let val = unsafe { *x.vget_unchecked(0) };
self.axpy(alpha * val, &col2, beta); self.axpy(alpha * val, &col2, beta);
self[0] += alpha * a.slice_range(1.., 0).cdot(&x.rows_range(1..)); self[0] += alpha * dotc(&a.slice_range(1.., 0), &x.rows_range(1..));
for j in 1..dim2 { for j in 1..dim2 {
let col2 = a.column(j); let col2 = a.column(j);
let dot = col2.rows_range(j..).cdot(&x.rows_range(j..)); let dot = dotc(&col2.rows_range(j..), &x.rows_range(j..));
let val; let val;
unsafe { unsafe {
@ -797,6 +629,111 @@ where
} }
} }
/// Computes `self = alpha * a * x + beta * self`, where `a` is a **symmetric** matrix, `x` a
/// vector, and `alpha, beta` two scalars. DEPRECATED: use `sygemv` instead.
#[inline]
#[deprecated(note = "This is renamed `sygemv` to match the original BLAS terminology.")]
pub fn gemv_symm<D2: Dim, D3: Dim, SB, SC>(
&mut self,
alpha: N,
a: &SquareMatrix<N, D2, SB>,
x: &Vector<N, D3, SC>,
beta: N,
) where
N: One,
SB: Storage<N, D2, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<D, D2> + AreMultipliable<D2, D2, D3, U1>,
{
self.sygemv(alpha, a, x, beta)
}
/// Computes `self = alpha * a * x + beta * self`, where `a` is a **symmetric** matrix, `x` a
/// vector, and `alpha, beta` two scalars.
///
/// If `beta` is zero, `self` is never read. If `self` is read, only its lower-triangular part
/// (including the diagonal) is actually read.
///
/// # Examples:
///
/// ```
/// # use nalgebra::{Matrix2, Vector2};
/// let mat = Matrix2::new(1.0, 2.0,
/// 2.0, 4.0);
/// let mut vec1 = Vector2::new(1.0, 2.0);
/// let vec2 = Vector2::new(0.1, 0.2);
/// vec1.sygemv(10.0, &mat, &vec2, 5.0);
/// assert_eq!(vec1, Vector2::new(10.0, 20.0));
///
///
/// // The matrix upper-triangular elements can be garbage because it is never
/// // read by this method. Therefore, it is not necessary for the caller to
/// // fill the matrix struct upper-triangle.
/// let mat = Matrix2::new(1.0, 9999999.9999999,
/// 2.0, 4.0);
/// let mut vec1 = Vector2::new(1.0, 2.0);
/// vec1.sygemv(10.0, &mat, &vec2, 5.0);
/// assert_eq!(vec1, Vector2::new(10.0, 20.0));
/// ```
#[inline]
pub fn sygemv<D2: Dim, D3: Dim, SB, SC>(
&mut self,
alpha: N,
a: &SquareMatrix<N, D2, SB>,
x: &Vector<N, D3, SC>,
beta: N,
) where
N: One,
SB: Storage<N, D2, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<D, D2> + AreMultipliable<D2, D2, D3, U1>,
{
self.xgemv(alpha, a, x, beta, |a, b| a.dot(b))
}
/// Computes `self = alpha * a * x + beta * self`, where `a` is an **hermitian** matrix, `x` a
/// vector, and `alpha, beta` two scalars.
///
/// If `beta` is zero, `self` is never read. If `self` is read, only its lower-triangular part
/// (including the diagonal) is actually read.
///
/// # Examples:
///
/// ```
/// # use nalgebra::{Matrix2, Vector2};
/// let mat = Matrix2::new(1.0, 2.0,
/// 2.0, 4.0);
/// let mut vec1 = Vector2::new(1.0, 2.0);
/// let vec2 = Vector2::new(0.1, 0.2);
/// vec1.sygemv(10.0, &mat, &vec2, 5.0);
/// assert_eq!(vec1, Vector2::new(10.0, 20.0));
///
///
/// // The matrix upper-triangular elements can be garbage because it is never
/// // read by this method. Therefore, it is not necessary for the caller to
/// // fill the matrix struct upper-triangle.
/// let mat = Matrix2::new(1.0, 9999999.9999999,
/// 2.0, 4.0);
/// let mut vec1 = Vector2::new(1.0, 2.0);
/// vec1.sygemv(10.0, &mat, &vec2, 5.0);
/// assert_eq!(vec1, Vector2::new(10.0, 20.0));
/// ```
#[inline]
pub fn hegemv<D2: Dim, D3: Dim, SB, SC>(
&mut self,
alpha: N,
a: &SquareMatrix<N, D2, SB>,
x: &Vector<N, D3, SC>,
beta: N,
) where
N: Complex,
SB: Storage<N, D2, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<D, D2> + AreMultipliable<D2, D2, D3, U1>,
{
self.xgemv(alpha, a, x, beta, |a, b| a.dotc(b))
}
/// Computes `self = alpha * a.transpose() * x + beta * self`, where `a` is a matrix, `x` a vector, and /// Computes `self = alpha * a.transpose() * x + beta * self`, where `a` is a matrix, `x` a vector, and
/// `alpha, beta` two scalars. /// `alpha, beta` two scalars.
/// ///
@ -855,33 +792,17 @@ where
} }
} }
// FIXME: duplicate code
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S> impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
where N: Complex + Zero + ClosedAdd + ClosedMul where N: Scalar + Zero + ClosedAdd + ClosedMul
{ {
/// Computes `self = alpha * x * y.transpose() + beta * self`. #[inline(always)]
/// fn gerx<D2: Dim, D3: Dim, SB, SC>(
/// If `beta` is zero, `self` is never read.
///
/// # Examples:
///
/// ```
/// # use nalgebra::{Matrix2x3, Vector2, Vector3};
/// let mut mat = Matrix2x3::repeat(4.0);
/// let vec1 = Vector2::new(1.0, 2.0);
/// let vec2 = Vector3::new(0.1, 0.2, 0.3);
/// let expected = vec1 * vec2.transpose() * 10.0 + mat * 5.0;
///
/// mat.ger(10.0, &vec1, &vec2, 5.0);
/// assert_eq!(mat, expected);
/// ```
#[inline]
pub fn gerc<D2: Dim, D3: Dim, SB, SC>(
&mut self, &mut self,
alpha: N, alpha: N,
x: &Vector<N, D2, SB>, x: &Vector<N, D2, SB>,
y: &Vector<N, D3, SC>, y: &Vector<N, D3, SC>,
beta: N, beta: N,
conjugate: impl Fn(N) -> N,
) where ) where
N: One, N: One,
SB: Storage<N, D2>, SB: Storage<N, D2>,
@ -899,15 +820,11 @@ impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
for j in 0..ncols1 { for j in 0..ncols1 {
// FIXME: avoid bound checks. // FIXME: avoid bound checks.
let val = unsafe { y.vget_unchecked(j).conjugate() }; let val = unsafe { conjugate(*y.vget_unchecked(j)) };
self.column_mut(j).axpy(alpha * val, x, beta); self.column_mut(j).axpy(alpha * val, x, beta);
} }
} }
}
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul
{
/// Computes `self = alpha * x * y.transpose() + beta * self`. /// Computes `self = alpha * x * y.transpose() + beta * self`.
/// ///
/// If `beta` is zero, `self` is never read. /// If `beta` is zero, `self` is never read.
@ -937,20 +854,39 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
SC: Storage<N, D3>, SC: Storage<N, D3>,
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>, ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>,
{ {
let (nrows1, ncols1) = self.shape(); self.gerx(alpha, x, y, beta, |e| e)
let dim2 = x.nrows();
let dim3 = y.nrows();
assert!(
nrows1 == dim2 && ncols1 == dim3,
"ger: dimensions mismatch."
);
for j in 0..ncols1 {
// FIXME: avoid bound checks.
let val = unsafe { *y.vget_unchecked(j) };
self.column_mut(j).axpy(alpha * val, x, beta);
} }
/// Computes `self = alpha * x * y.transpose() + beta * self`.
///
/// If `beta` is zero, `self` is never read.
///
/// # Examples:
///
/// ```
/// # use nalgebra::{Matrix2x3, Vector2, Vector3};
/// let mut mat = Matrix2x3::repeat(4.0);
/// let vec1 = Vector2::new(1.0, 2.0);
/// let vec2 = Vector3::new(0.1, 0.2, 0.3);
/// let expected = vec1 * vec2.transpose() * 10.0 + mat * 5.0;
///
/// mat.ger(10.0, &vec1, &vec2, 5.0);
/// assert_eq!(mat, expected);
/// ```
#[inline]
pub fn gerc<D2: Dim, D3: Dim, SB, SC>(
&mut self,
alpha: N,
x: &Vector<N, D2, SB>,
y: &Vector<N, D3, SC>,
beta: N,
) where
N: Complex,
SB: Storage<N, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>,
{
self.gerx(alpha, x, y, beta, Complex::conjugate)
} }
/// Computes `self = alpha * a * b + beta * self`, where `a, b, self` are matrices. /// Computes `self = alpha * a * b + beta * self`, where `a, b, self` are matrices.
@ -1146,6 +1082,42 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S> impl<N, R1: Dim, C1: Dim, S: StorageMut<N, R1, C1>> Matrix<N, R1, C1, S>
where N: Scalar + Zero + ClosedAdd + ClosedMul where N: Scalar + Zero + ClosedAdd + ClosedMul
{ {
#[inline(always)]
fn sygerx<D2: Dim, D3: Dim, SB, SC>(
&mut self,
alpha: N,
x: &Vector<N, D2, SB>,
y: &Vector<N, D3, SC>,
beta: N,
conjugate: impl Fn(N) -> N,
) where
N: One,
SB: Storage<N, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>,
{
let dim1 = self.nrows();
let dim2 = x.nrows();
let dim3 = y.nrows();
assert!(
self.is_square(),
"Symmetric ger: the input matrix must be square."
);
assert!(dim1 == dim2 && dim1 == dim3, "ger: dimensions mismatch.");
for j in 0..dim1 {
let val = unsafe { conjugate(*y.vget_unchecked(j)) };
let subdim = Dynamic::new(dim1 - j);
// FIXME: avoid bound checks.
self.generic_slice_mut((j, j), (subdim, U1)).axpy(
alpha * val,
&x.rows_range(j..),
beta,
);
}
}
/// Computes `self = alpha * x * y.transpose() + beta * self`, where `self` is a **symmetric** /// Computes `self = alpha * x * y.transpose() + beta * self`, where `self` is a **symmetric**
/// matrix. /// matrix.
/// ///
@ -1166,6 +1138,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
/// assert_eq!(mat.lower_triangle(), expected.lower_triangle()); /// assert_eq!(mat.lower_triangle(), expected.lower_triangle());
/// assert_eq!(mat.m12, 99999.99999); // This was untouched. /// assert_eq!(mat.m12, 99999.99999); // This was untouched.
#[inline] #[inline]
#[deprecated(note = "This is renamed `syger` to match the original BLAS terminology.")]
pub fn ger_symm<D2: Dim, D3: Dim, SB, SC>( pub fn ger_symm<D2: Dim, D3: Dim, SB, SC>(
&mut self, &mut self,
alpha: N, alpha: N,
@ -1178,26 +1151,77 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul
SC: Storage<N, D3>, SC: Storage<N, D3>,
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>, ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>,
{ {
let dim1 = self.nrows(); self.syger(alpha, x, y, beta)
let dim2 = x.nrows();
let dim3 = y.nrows();
assert!(
self.is_square(),
"Symmetric ger: the input matrix must be square."
);
assert!(dim1 == dim2 && dim1 == dim3, "ger: dimensions mismatch.");
for j in 0..dim1 {
let val = unsafe { *y.vget_unchecked(j) };
let subdim = Dynamic::new(dim1 - j);
// FIXME: avoid bound checks.
self.generic_slice_mut((j, j), (subdim, U1)).axpy(
alpha * val,
&x.rows_range(j..),
beta,
);
} }
/// Computes `self = alpha * x * y.transpose() + beta * self`, where `self` is a **symmetric**
/// matrix.
///
/// If `beta` is zero, `self` is never read. The result is symmetric. Only the lower-triangular
/// (including the diagonal) part of `self` is read/written.
///
/// # Examples:
///
/// ```
/// # use nalgebra::{Matrix2, Vector2};
/// let mut mat = Matrix2::identity();
/// let vec1 = Vector2::new(1.0, 2.0);
/// let vec2 = Vector2::new(0.1, 0.2);
/// let expected = vec1 * vec2.transpose() * 10.0 + mat * 5.0;
/// mat.m12 = 99999.99999; // This component is on the upper-triangular part and will not be read/written.
///
/// mat.ger_symm(10.0, &vec1, &vec2, 5.0);
/// assert_eq!(mat.lower_triangle(), expected.lower_triangle());
/// assert_eq!(mat.m12, 99999.99999); // This was untouched.
#[inline]
pub fn syger<D2: Dim, D3: Dim, SB, SC>(
&mut self,
alpha: N,
x: &Vector<N, D2, SB>,
y: &Vector<N, D3, SC>,
beta: N,
) where
N: One,
SB: Storage<N, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>,
{
self.sygerx(alpha, x, y, beta, |e| e)
}
/// Computes `self = alpha * x * y.transpose() + beta * self`, where `self` is a **symmetric**
/// matrix.
///
/// If `beta` is zero, `self` is never read. The result is symmetric. Only the lower-triangular
/// (including the diagonal) part of `self` is read/written.
///
/// # Examples:
///
/// ```
/// # use nalgebra::{Matrix2, Vector2};
/// let mut mat = Matrix2::identity();
/// let vec1 = Vector2::new(1.0, 2.0);
/// let vec2 = Vector2::new(0.1, 0.2);
/// let expected = vec1 * vec2.transpose() * 10.0 + mat * 5.0;
/// mat.m12 = 99999.99999; // This component is on the upper-triangular part and will not be read/written.
///
/// mat.ger_symm(10.0, &vec1, &vec2, 5.0);
/// assert_eq!(mat.lower_triangle(), expected.lower_triangle());
/// assert_eq!(mat.m12, 99999.99999); // This was untouched.
#[inline]
pub fn hegerc<D2: Dim, D3: Dim, SB, SC>(
&mut self,
alpha: N,
x: &Vector<N, D2, SB>,
y: &Vector<N, D3, SC>,
beta: N,
) where
N: Complex,
SB: Storage<N, D2>,
SC: Storage<N, D3>,
ShapeConstraint: DimEq<R1, D2> + DimEq<C1, D3>,
{
self.sygerx(alpha, x, y, beta, Complex::conjugate)
} }
} }

View File

@ -914,9 +914,9 @@ impl<N: Scalar, D: Dim, S: StorageMut<N, D, D>> Matrix<N, D, D, S> {
} }
impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> { impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Takes the conjugate and transposes `self` and store the result into `out`. /// Takes the adjoint (aka. conjugate-transpose) of `self` and store the result into `out`.
#[inline] #[inline]
pub fn conjugate_transpose_to<R2, C2, SB>(&self, out: &mut Matrix<N, R2, C2, SB>) pub fn adjoint_to<R2, C2, SB>(&self, out: &mut Matrix<N, R2, C2, SB>)
where where
R2: Dim, R2: Dim,
C2: Dim, C2: Dim,
@ -939,20 +939,41 @@ impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
} }
} }
/// The conjugate transposition of `self`. /// The adjoint (aka. conjugate-transpose) of `self`.
#[inline] #[inline]
pub fn conjugate_transpose(&self) -> MatrixMN<N, C, R> pub fn adjoint(&self) -> MatrixMN<N, C, R>
where DefaultAllocator: Allocator<N, C, R> { where DefaultAllocator: Allocator<N, C, R> {
let (nrows, ncols) = self.data.shape(); let (nrows, ncols) = self.data.shape();
unsafe { unsafe {
let mut res: MatrixMN<_, C, R> = Matrix::new_uninitialized_generic(ncols, nrows); let mut res: MatrixMN<_, C, R> = Matrix::new_uninitialized_generic(ncols, nrows);
self.conjugate_transpose_to(&mut res); self.adjoint_to(&mut res);
res res
} }
} }
/// Takes the conjugate and transposes `self` and store the result into `out`.
#[deprecated(note = "Renamed `self.adjoint_to(out)`.")]
#[inline]
pub fn conjugate_transpose_to<R2, C2, SB>(&self, out: &mut Matrix<N, R2, C2, SB>)
where
R2: Dim,
C2: Dim,
SB: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
{
self.adjoint_to(out)
}
/// The conjugate transposition of `self`.
#[deprecated(note = "Renamed `self.adjoint()`.")]
#[inline]
pub fn conjugate_transpose(&self) -> MatrixMN<N, C, R>
where DefaultAllocator: Allocator<N, C, R> {
self.adjoint()
}
/// The conjugate of `self`. /// The conjugate of `self`.
#[inline] #[inline]
pub fn conjugate(&self) -> MatrixMN<N, R, C> pub fn conjugate(&self) -> MatrixMN<N, R, C>
@ -1088,13 +1109,13 @@ impl<N: Complex, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
tr tr
} }
/// The hermitian part of `self`, i.e., `0.5 * (self + self.conjugate_transpose())`. /// The hermitian part of `self`, i.e., `0.5 * (self + self.adjoint())`.
#[inline] #[inline]
pub fn hermitian_part(&self) -> MatrixMN<N, D, D> pub fn hermitian_part(&self) -> MatrixMN<N, D, D>
where DefaultAllocator: Allocator<N, D, D> { where DefaultAllocator: Allocator<N, D, D> {
assert!(self.is_square(), "Cannot compute the hermitian part of a non-square matrix."); assert!(self.is_square(), "Cannot compute the hermitian part of a non-square matrix.");
let mut tr = self.conjugate_transpose(); let mut tr = self.adjoint();
tr += self; tr += self;
tr *= ::convert::<_, N>(0.5); tr *= ::convert::<_, N>(0.5);
tr tr
@ -1522,7 +1543,7 @@ impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
SB: Storage<N, R2, C2>, SB: Storage<N, R2, C2>,
ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>, ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
{ {
let prod = self.cdot(other); let prod = self.dotc(other);
let n1 = self.norm(); let n1 = self.norm();
let n2 = other.norm(); let n2 = other.norm();
@ -1593,7 +1614,7 @@ impl<N: Complex, D: Dim, S: Storage<N, D>> Unit<Vector<N, D, S>> {
where where
DefaultAllocator: Allocator<N, D>, DefaultAllocator: Allocator<N, D>,
{ {
let c_hang = self.cdot(rhs).real(); let c_hang = self.dotc(rhs).real();
// self == other // self == other
if c_hang.abs() >= N::Real::one() { if c_hang.abs() >= N::Real::one() {

View File

@ -192,7 +192,7 @@ where DefaultAllocator: Allocator<N, R, C>
#[inline] #[inline]
fn inner_product(&self, other: &Self) -> N { fn inner_product(&self, other: &Self) -> N {
self.cdot(other) self.dotc(other)
} }
} }

View File

@ -33,7 +33,7 @@ impl<N: Complex> Norm<N> for EuclideanNorm {
#[inline] #[inline]
fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::Real fn norm<R, C, S>(&self, m: &Matrix<N, R, C, S>) -> N::Real
where R: Dim, C: Dim, S: Storage<N, R, C> { where R: Dim, C: Dim, S: Storage<N, R, C> {
m.cdot(m).real().sqrt() m.dotc(m).real().sqrt()
} }
#[inline] #[inline]
@ -101,7 +101,7 @@ impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
for i in 0..self.ncols() { for i in 0..self.ncols() {
let col = self.column(i); let col = self.column(i);
res += col.cdot(&col).real() res += col.dotc(&col).real()
} }
res res

View File

@ -13,7 +13,7 @@ use base::constraint::{
}; };
use base::dimension::{Dim, DimMul, DimName, DimProd}; use base::dimension::{Dim, DimMul, DimName, DimProd};
use base::storage::{ContiguousStorageMut, Storage, StorageMut}; use base::storage::{ContiguousStorageMut, Storage, StorageMut};
use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, MatrixSum, Scalar}; use base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, MatrixSum, Scalar, VectorSliceN};
/* /*
* *
@ -629,13 +629,28 @@ where
res res
} }
/// Equivalent to `self.transpose() * rhs` but stores the result into `out` to avoid /// Equivalent to `self.adjoint() * rhs`.
/// allocations.
#[inline] #[inline]
pub fn tr_mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>( pub fn ad_mul<R2: Dim, C2: Dim, SB>(&self, rhs: &Matrix<N, R2, C2, SB>) -> MatrixMN<N, C1, C2>
where
N: Complex,
SB: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, C1, C2>,
ShapeConstraint: SameNumberOfRows<R1, R2>,
{
let mut res =
unsafe { Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1) };
self.ad_mul_to(rhs, &mut res);
res
}
#[inline(always)]
fn xx_mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
&self, &self,
rhs: &Matrix<N, R2, C2, SB>, rhs: &Matrix<N, R2, C2, SB>,
out: &mut Matrix<N, R3, C3, SC>, out: &mut Matrix<N, R3, C3, SC>,
dot: impl Fn(&VectorSliceN<N, R1, SA::RStride, SA::CStride>, &VectorSliceN<N, R2, SB::RStride, SB::CStride>) -> N,
) where ) where
SB: Storage<N, R2, C2>, SB: Storage<N, R2, C2>,
SC: StorageMut<N, R3, C3>, SC: StorageMut<N, R3, C3>,
@ -656,12 +671,43 @@ where
for i in 0..ncols1 { for i in 0..ncols1 {
for j in 0..ncols2 { for j in 0..ncols2 {
let dot = self.column(i).dot(&rhs.column(j)); let dot = dot(&self.column(i), &rhs.column(j));
unsafe { *out.get_unchecked_mut((i, j)) = dot }; unsafe { *out.get_unchecked_mut((i, j)) = dot };
} }
} }
} }
/// Equivalent to `self.transpose() * rhs` but stores the result into `out` to avoid
/// allocations.
#[inline]
pub fn tr_mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
&self,
rhs: &Matrix<N, R2, C2, SB>,
out: &mut Matrix<N, R3, C3, SC>,
) where
SB: Storage<N, R2, C2>,
SC: StorageMut<N, R3, C3>,
ShapeConstraint: SameNumberOfRows<R1, R2> + DimEq<C1, R3> + DimEq<C2, C3>,
{
self.xx_mul_to(rhs, out, |a, b| a.dot(b))
}
/// Equivalent to `self.adjoint() * rhs` but stores the result into `out` to avoid
/// allocations.
#[inline]
pub fn ad_mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
&self,
rhs: &Matrix<N, R2, C2, SB>,
out: &mut Matrix<N, R3, C3, SC>,
) where
N: Complex,
SB: Storage<N, R2, C2>,
SC: StorageMut<N, R3, C3>,
ShapeConstraint: SameNumberOfRows<R1, R2> + DimEq<C1, R3> + DimEq<C2, C3>,
{
self.xx_mul_to(rhs, out, |a, b| a.dotc(b))
}
/// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations. /// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations.
#[inline] #[inline]
pub fn mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>( pub fn mul_to<R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
@ -760,35 +806,16 @@ where
} }
} }
// XXX: avoid code duplication. impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> { #[inline(always)]
/// Returns the absolute value of the component with the largest absolute value. fn xcmp<N2>(&self, abs: impl Fn(N) -> N2, cmp: impl Fn(N2, N2) -> bool) -> N2
#[inline] where N2: Scalar + PartialOrd + Zero {
pub fn camax(&self) -> N::Real { let mut max = N2::zero();
let mut max = N::Real::zero();
for e in self.iter() { for e in self.iter() {
let ae = e.asum(); let ae = abs(*e);
if ae > max { if cmp(ae, max) {
max = ae;
}
}
max
}
}
impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Returns the absolute value of the component with the largest absolute value.
#[inline]
pub fn amax(&self) -> N {
let mut max = N::zero();
for e in self.iter() {
let ae = e.abs();
if ae > max {
max = ae; max = ae;
} }
} }
@ -796,61 +823,45 @@ impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matri
max max
} }
/// Returns the absolute value of the component with the smallest absolute value. /// Returns the absolute value of the component with the largest absolute value.
#[inline] #[inline]
pub fn amin(&self) -> N { pub fn amax(&self) -> N
let mut it = self.iter(); where N: PartialOrd + Signed {
let mut min = it self.xcmp(|e| e.abs(), |a, b| a > b)
.next()
.expect("amin: empty matrices not supported.")
.abs();
for e in it {
let ae = e.abs();
if ae < min {
min = ae;
}
} }
min /// Returns the the 1-norm of the complex component with the largest 1-norm.
#[inline]
pub fn camax(&self) -> N::Real
where N: Complex {
self.xcmp(|e| e.norm1(), |a, b| a > b)
} }
/// Returns the component with the largest value. /// Returns the component with the largest value.
#[inline] #[inline]
pub fn max(&self) -> N { pub fn max(&self) -> N
let mut it = self.iter(); where N: PartialOrd + Signed {
let mut max = it self.xcmp(|e| e, |a, b| a > b)
.next()
.expect("max: empty matrices not supported.");
for e in it {
let ae = e;
if ae > max {
max = ae;
}
} }
*max /// Returns the absolute value of the component with the smallest absolute value.
#[inline]
pub fn amin(&self) -> N
where N: PartialOrd + Signed {
self.xcmp(|e| e.abs(), |a, b| a < b)
}
/// Returns the the 1-norm of the complex component with the smallest 1-norm.
#[inline]
pub fn camin(&self) -> N::Real
where N: Complex {
self.xcmp(|e| e.norm1(), |a, b| a < b)
} }
/// Returns the component with the smallest value. /// Returns the component with the smallest value.
#[inline] #[inline]
pub fn min(&self) -> N { pub fn min(&self) -> N
let mut it = self.iter(); where N: PartialOrd + Signed {
let mut min = it self.xcmp(|e| e, |a, b| a < b)
.next()
.expect("min: empty matrices not supported.");
for e in it {
let ae = e;
if ae < min {
min = ae;
}
}
*min
} }
} }

View File

@ -97,8 +97,7 @@ impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
N::Epsilon: Copy, N::Epsilon: Copy,
DefaultAllocator: Allocator<N, R, C> + Allocator<N, C, C>, DefaultAllocator: Allocator<N, R, C> + Allocator<N, C, C>,
{ {
// FIXME: add a conjugate-transpose-mul (self.ad_mul(self)).is_identity(eps)
(self.conjugate().tr_mul(self)).is_identity(eps)
} }
} }

View File

@ -31,7 +31,7 @@ where DefaultAllocator: Allocator<N, D, D>
/// random reals generators. /// random reals generators.
pub fn new<Rand: FnMut() -> N>(dim: D, mut rand: Rand) -> Self { pub fn new<Rand: FnMut() -> N>(dim: D, mut rand: Rand) -> Self {
let mut m = RandomOrthogonal::new(dim, || rand()).unwrap(); let mut m = RandomOrthogonal::new(dim, || rand()).unwrap();
let mt = m.conjugate_transpose(); let mt = m.adjoint();
for i in 0..dim.value() { for i in 0..dim.value() {
let mut col = m.column_mut(i); let mut col = m.column_mut(i);

View File

@ -35,7 +35,7 @@ impl<N: Complex, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
D: DimName, D: DimName,
DefaultAllocator: Allocator<N, D>, DefaultAllocator: Allocator<N, D>,
{ {
let bias = axis.cdot(&pt.coords); let bias = axis.dotc(&pt.coords);
Self::new(axis, bias) Self::new(axis, bias)
} }
@ -56,7 +56,7 @@ impl<N: Complex, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
// dot product, and then mutably. Somehow, this allows significantly // dot product, and then mutably. Somehow, this allows significantly
// better optimizations of the dot product from the compiler. // better optimizations of the dot product from the compiler.
let m_two: N = ::convert(-2.0f64); let m_two: N = ::convert(-2.0f64);
let factor = (self.axis.cdot(&rhs.column(i)) - self.bias) * m_two; let factor = (self.axis.dotc(&rhs.column(i)) - self.bias) * m_two;
rhs.column_mut(i).axpy(factor, &self.axis, N::one()); rhs.column_mut(i).axpy(factor, &self.axis, N::one());
} }
} }
@ -73,7 +73,7 @@ impl<N: Complex, D: Dim, S: Storage<N, D>> Reflection<N, D, S> {
// dot product, and then mutably. Somehow, this allows significantly // dot product, and then mutably. Somehow, this allows significantly
// better optimizations of the dot product from the compiler. // better optimizations of the dot product from the compiler.
let m_two = sign.scale(::convert(-2.0f64)); let m_two = sign.scale(::convert(-2.0f64));
let factor = (self.axis.cdot(&rhs.column(i)) - self.bias) * m_two; let factor = (self.axis.dotc(&rhs.column(i)) - self.bias) * m_two;
rhs.column_mut(i).axpy(factor, &self.axis, sign); rhs.column_mut(i).axpy(factor, &self.axis, sign);
} }
} }

View File

@ -170,6 +170,7 @@ pub use alga::general::{Id, Real, Complex};
/// Gets the ubiquitous multiplicative identity element. /// Gets the ubiquitous multiplicative identity element.
/// ///
/// Same as `Id::new()`. /// Same as `Id::new()`.
#[deprecated(note = "use `Id::new()` instead.")]
#[inline] #[inline]
pub fn id() -> Id { pub fn id() -> Id {
Id::new() Id::new()
@ -416,6 +417,7 @@ pub fn partial_sort2<'a, T: PartialOrd>(a: &'a T, b: &'a T) -> Option<(&'a T, &'
/// # See also: /// # See also:
/// ///
/// * [`inverse`](fn.inverse.html) /// * [`inverse`](fn.inverse.html)
#[deprecated(note = "use the `.try_inverse()` method instead")]
#[inline] #[inline]
pub fn try_inverse<M: AlgaSquareMatrix>(m: &M) -> Option<M> { pub fn try_inverse<M: AlgaSquareMatrix>(m: &M) -> Option<M> {
m.try_inverse() m.try_inverse()
@ -426,6 +428,7 @@ pub fn try_inverse<M: AlgaSquareMatrix>(m: &M) -> Option<M> {
/// # See also: /// # See also:
/// ///
/// * [`try_inverse`](fn.try_inverse.html) /// * [`try_inverse`](fn.try_inverse.html)
#[deprecated(note = "use the `.inverse()` method instead")]
#[inline] #[inline]
pub fn inverse<M: TwoSidedInverse<Multiplicative>>(m: &M) -> M { pub fn inverse<M: TwoSidedInverse<Multiplicative>>(m: &M) -> M {
m.two_sided_inverse() m.two_sided_inverse()

View File

@ -121,7 +121,7 @@ where DefaultAllocator: Allocator<N, D, D>
ShapeConstraint: SameNumberOfRows<R2, D>, ShapeConstraint: SameNumberOfRows<R2, D>,
{ {
let _ = self.chol.solve_lower_triangular_mut(b); let _ = self.chol.solve_lower_triangular_mut(b);
let _ = self.chol.conjugate().tr_solve_lower_triangular_mut(b); let _ = self.chol.ad_solve_lower_triangular_mut(b);
} }
/// Returns the solution of the system `self * x = b` where `self` is the decomposed matrix and /// Returns the solution of the system `self * x = b` where `self` is the decomposed matrix and

View File

@ -30,6 +30,6 @@ pub use self::lu::*;
pub use self::permutation_sequence::*; pub use self::permutation_sequence::*;
pub use self::qr::*; pub use self::qr::*;
pub use self::schur::*; pub use self::schur::*;
//pub use self::svd::*; pub use self::svd::*;
pub use self::symmetric_eigen::*; pub use self::symmetric_eigen::*;
pub use self::symmetric_tridiagonal::*; pub use self::symmetric_tridiagonal::*;

View File

@ -4,7 +4,7 @@ use base::allocator::Allocator;
use base::constraint::{SameNumberOfRows, ShapeConstraint}; use base::constraint::{SameNumberOfRows, ShapeConstraint};
use base::dimension::{Dim, U1}; use base::dimension::{Dim, U1};
use base::storage::{Storage, StorageMut}; use base::storage::{Storage, StorageMut};
use base::{DefaultAllocator, Matrix, MatrixMN, SquareMatrix, Vector}; use base::{DefaultAllocator, Matrix, MatrixMN, SquareMatrix, Vector, DVectorSlice};
impl<N: Complex, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> { impl<N: Complex, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/// Computes the solution of the linear system `self . x = b` where `x` is the unknown and only /// Computes the solution of the linear system `self . x = b` where `x` is the unknown and only
@ -180,10 +180,9 @@ impl<N: Complex, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
/* /*
* *
* Transpose versions * Transpose and adjoint versions
* *
*/ */
/// Computes the solution of the linear system `self.transpose() . x = b` where `x` is the unknown and only /// Computes the solution of the linear system `self.transpose() . x = b` where `x` is the unknown and only
/// the lower-triangular part of `self` (including the diagonal) is considered not-zero. /// the lower-triangular part of `self` (including the diagonal) is considered not-zero.
#[inline] #[inline]
@ -237,7 +236,7 @@ impl<N: Complex, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
let cols = b.ncols(); let cols = b.ncols();
for i in 0..cols { for i in 0..cols {
if !self.tr_solve_lower_triangular_vector_mut(&mut b.column_mut(i)) { if !self.xx_solve_lower_triangular_vector_mut(&mut b.column_mut(i), |e| e, |a, b| a.dot(b)) {
return false; return false;
} }
} }
@ -245,32 +244,6 @@ impl<N: Complex, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
true true
} }
fn tr_solve_lower_triangular_vector_mut<R2: Dim, S2>(&self, b: &mut Vector<N, R2, S2>) -> bool
where
S2: StorageMut<N, R2, U1>,
ShapeConstraint: SameNumberOfRows<R2, D>,
{
let dim = self.nrows();
for i in (0..dim).rev() {
let dot = self.slice_range(i + 1.., i).dot(&b.slice_range(i + 1.., 0));
unsafe {
let b_i = b.vget_unchecked_mut(i);
let diag = *self.get_unchecked((i, i));
if diag.is_zero() {
return false;
}
*b_i = (*b_i - dot) / diag;
}
}
true
}
/// Solves the linear system `self.transpose() . x = b` where `x` is the unknown and only the /// Solves the linear system `self.transpose() . x = b` where `x` is the unknown and only the
/// upper-triangular part of `self` (including the diagonal) is considered not-zero. /// upper-triangular part of `self` (including the diagonal) is considered not-zero.
pub fn tr_solve_upper_triangular_mut<R2: Dim, C2: Dim, S2>( pub fn tr_solve_upper_triangular_mut<R2: Dim, C2: Dim, S2>(
@ -284,7 +257,7 @@ impl<N: Complex, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
let cols = b.ncols(); let cols = b.ncols();
for i in 0..cols { for i in 0..cols {
if !self.tr_solve_upper_triangular_vector_mut(&mut b.column_mut(i)) { if !self.xx_solve_upper_triangular_vector_mut(&mut b.column_mut(i), |e| e, |a, b| a.dot(b)) {
return false; return false;
} }
} }
@ -292,7 +265,128 @@ impl<N: Complex, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
true true
} }
fn tr_solve_upper_triangular_vector_mut<R2: Dim, S2>(&self, b: &mut Vector<N, R2, S2>) -> bool /// Computes the solution of the linear system `self.adjoint() . x = b` where `x` is the unknown and only
/// the lower-triangular part of `self` (including the diagonal) is considered not-zero.
#[inline]
pub fn ad_solve_lower_triangular<R2: Dim, C2: Dim, S2>(
&self,
b: &Matrix<N, R2, C2, S2>,
) -> Option<MatrixMN<N, R2, C2>>
where
S2: StorageMut<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R2, D>,
{
let mut res = b.clone_owned();
if self.ad_solve_lower_triangular_mut(&mut res) {
Some(res)
} else {
None
}
}
/// Computes the solution of the linear system `self.adjoint() . x = b` where `x` is the unknown and only
/// the upper-triangular part of `self` (including the diagonal) is considered not-zero.
#[inline]
pub fn ad_solve_upper_triangular<R2: Dim, C2: Dim, S2>(
&self,
b: &Matrix<N, R2, C2, S2>,
) -> Option<MatrixMN<N, R2, C2>>
where
S2: StorageMut<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R2, D>,
{
let mut res = b.clone_owned();
if self.ad_solve_upper_triangular_mut(&mut res) {
Some(res)
} else {
None
}
}
/// Solves the linear system `self.adjoint() . x = b` where `x` is the unknown and only the
/// lower-triangular part of `self` (including the diagonal) is considered not-zero.
pub fn ad_solve_lower_triangular_mut<R2: Dim, C2: Dim, S2>(
&self,
b: &mut Matrix<N, R2, C2, S2>,
) -> bool
where
S2: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R2, D>,
{
let cols = b.ncols();
for i in 0..cols {
if !self.xx_solve_lower_triangular_vector_mut(&mut b.column_mut(i), |e| e.conjugate(), |a, b| a.dotc(b)) {
return false;
}
}
true
}
/// Solves the linear system `self.adjoint() . x = b` where `x` is the unknown and only the
/// upper-triangular part of `self` (including the diagonal) is considered not-zero.
pub fn ad_solve_upper_triangular_mut<R2: Dim, C2: Dim, S2>(
&self,
b: &mut Matrix<N, R2, C2, S2>,
) -> bool
where
S2: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<R2, D>,
{
let cols = b.ncols();
for i in 0..cols {
if !self.xx_solve_upper_triangular_vector_mut(&mut b.column_mut(i), |e| e.conjugate(), |a, b| a.dotc(b)) {
return false;
}
}
true
}
#[inline(always)]
fn xx_solve_lower_triangular_vector_mut<R2: Dim, S2>(
&self,
b: &mut Vector<N, R2, S2>,
conjugate: impl Fn(N) -> N,
dot: impl Fn(&DVectorSlice<N, S::RStride, S::CStride>, &DVectorSlice<N, S2::RStride, S2::CStride>) -> N,
) -> bool
where
S2: StorageMut<N, R2, U1>,
ShapeConstraint: SameNumberOfRows<R2, D>,
{
let dim = self.nrows();
for i in (0..dim).rev() {
let dot = dot(&self.slice_range(i + 1.., i), &b.slice_range(i + 1.., 0));
unsafe {
let b_i = b.vget_unchecked_mut(i);
let diag = conjugate(*self.get_unchecked((i, i)));
if diag.is_zero() {
return false;
}
*b_i = (*b_i - dot) / diag;
}
}
true
}
#[inline(always)]
fn xx_solve_upper_triangular_vector_mut<R2: Dim, S2>(
&self,
b: &mut Vector<N, R2, S2>,
conjugate: impl Fn(N) -> N,
dot: impl Fn(&DVectorSlice<N, S::RStride, S::CStride>, &DVectorSlice<N, S2::RStride, S2::CStride>) -> N,
) -> bool
where where
S2: StorageMut<N, R2, U1>, S2: StorageMut<N, R2, U1>,
ShapeConstraint: SameNumberOfRows<R2, D>, ShapeConstraint: SameNumberOfRows<R2, D>,
@ -300,11 +394,11 @@ impl<N: Complex, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
let dim = self.nrows(); let dim = self.nrows();
for i in 0..dim { for i in 0..dim {
let dot = self.slice_range(..i, i).dot(&b.slice_range(..i, 0)); let dot = dot(&self.slice_range(..i, i), &b.slice_range(..i, 0));
unsafe { unsafe {
let b_i = b.vget_unchecked_mut(i); let b_i = b.vget_unchecked_mut(i);
let diag = *self.get_unchecked((i, i)); let diag = conjugate(*self.get_unchecked((i, i)));
if diag.is_zero() { if diag.is_zero() {
return false; return false;

View File

@ -496,7 +496,7 @@ where
} }
} }
self.recompose().map(|m| m.conjugate_transpose()) self.recompose().map(|m| m.adjoint())
} }
} }
@ -521,7 +521,7 @@ where
else { else {
match (&self.u, &self.v_t) { match (&self.u, &self.v_t) {
(Some(u), Some(v_t)) => { (Some(u), Some(v_t)) => {
let mut ut_b = u.conjugate().tr_mul(b); let mut ut_b = u.ad_mul(b);
for j in 0..ut_b.ncols() { for j in 0..ut_b.ncols() {
let mut col = ut_b.column_mut(j); let mut col = ut_b.column_mut(j);
@ -536,7 +536,7 @@ where
} }
} }
Ok(v_t.conjugate().tr_mul(&ut_b)) Ok(v_t.ad_mul(&ut_b))
} }
(None, None) => Err("SVD solve: U and V^t have not been computed."), (None, None) => Err("SVD solve: U and V^t have not been computed."),
(None, _) => Err("SVD solve: U has not been computed."), (None, _) => Err("SVD solve: U has not been computed."),

View File

@ -75,13 +75,13 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
if not_zero { if not_zero {
let mut p = p.rows_range_mut(i..); let mut p = p.rows_range_mut(i..);
p.cgemv_symm(::convert(2.0), &m, &axis, N::zero()); p.hegemv(::convert(2.0), &m, &axis, N::zero());
let dot = axis.cdot(&p); let dot = axis.dotc(&p);
// p.axpy(-dot, &axis.conjugate(), N::one()); // p.axpy(-dot, &axis.conjugate(), N::one());
m.ger_symm(-N::one(), &p, &axis.conjugate(), N::one()); m.hegerc(-N::one(), &p, &axis, N::one());
m.ger_symm(-N::one(), &axis, &p.conjugate(), N::one()); m.hegerc(-N::one(), &axis, &p, N::one());
m.ger_symm(dot * ::convert(2.0), &axis, &axis.conjugate(), N::one()); m.hegerc(dot * ::convert(2.0), &axis, &axis, N::one());
} }
} }
@ -142,7 +142,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
self.tri[(i, i + 1)] = val; self.tri[(i, i + 1)] = val;
} }
&q * self.tri * q.conjugate_transpose() &q * self.tri * q.adjoint()
} }
} }

View File

@ -19,14 +19,14 @@ quickcheck! {
let mut y2 = y1.clone(); let mut y2 = y1.clone();
y1.gemv(alpha, &a, &x, beta); y1.gemv(alpha, &a, &x, beta);
y2.gemv_symm(alpha, &a.lower_triangle(), &x, beta); y2.sygemv(alpha, &a.lower_triangle(), &x, beta);
if !relative_eq!(y1, y2, epsilon = 1.0e-10) { if !relative_eq!(y1, y2, epsilon = 1.0e-10) {
return false; return false;
} }
y1.gemv(alpha, &a, &x, 0.0); y1.gemv(alpha, &a, &x, 0.0);
y2.gemv_symm(alpha, &a.lower_triangle(), &x, 0.0); y2.sygemv(alpha, &a.lower_triangle(), &x, 0.0);
relative_eq!(y1, y2, epsilon = 1.0e-10) relative_eq!(y1, y2, epsilon = 1.0e-10)
} }
@ -61,14 +61,14 @@ quickcheck! {
let y = DVector::new_random(n); let y = DVector::new_random(n);
a1.ger(alpha, &x, &y, beta); a1.ger(alpha, &x, &y, beta);
a2.ger_symm(alpha, &x, &y, beta); a2.syger(alpha, &x, &y, beta);
if !relative_eq!(a1.lower_triangle(), a2) { if !relative_eq!(a1.lower_triangle(), a2) {
return false; return false;
} }
a1.ger(alpha, &x, &y, 0.0); a1.ger(alpha, &x, &y, 0.0);
a2.ger_symm(alpha, &x, &y, 0.0); a2.syger(alpha, &x, &y, 0.0);
relative_eq!(a1.lower_triangle(), a2) relative_eq!(a1.lower_triangle(), a2)
} }

View File

@ -16,7 +16,7 @@ macro_rules! gen_tests(
fn cholesky(n: usize) -> bool { fn cholesky(n: usize) -> bool {
let m = RandomSDP::new(Dynamic::new(n.max(1).min(50)), || random::<$scalar>().0).unwrap(); let m = RandomSDP::new(Dynamic::new(n.max(1).min(50)), || random::<$scalar>().0).unwrap();
let l = m.clone().cholesky().unwrap().unpack(); let l = m.clone().cholesky().unwrap().unpack();
relative_eq!(m, &l * l.conjugate_transpose(), epsilon = 1.0e-7) relative_eq!(m, &l * l.adjoint(), epsilon = 1.0e-7)
} }
fn cholesky_static(_m: RandomSDP<f64, U4>) -> bool { fn cholesky_static(_m: RandomSDP<f64, U4>) -> bool {
@ -24,7 +24,7 @@ macro_rules! gen_tests(
let chol = m.cholesky().unwrap(); let chol = m.cholesky().unwrap();
let l = chol.unpack(); let l = chol.unpack();
if !relative_eq!(m, &l * l.conjugate_transpose(), epsilon = 1.0e-7) { if !relative_eq!(m, &l * l.adjoint(), epsilon = 1.0e-7) {
false false
} }
else { else {

View File

@ -27,21 +27,21 @@ macro_rules! gen_tests(
let hess = m.clone().hessenberg(); let hess = m.clone().hessenberg();
let (p, h) = hess.unpack(); let (p, h) = hess.unpack();
relative_eq!(m, &p * h * p.conjugate_transpose(), epsilon = 1.0e-7) relative_eq!(m, &p * h * p.adjoint(), epsilon = 1.0e-7)
} }
fn hessenberg_static_mat2(m: Matrix2<$scalar>) -> bool { fn hessenberg_static_mat2(m: Matrix2<$scalar>) -> bool {
let m = m.map(|e| e.0); let m = m.map(|e| e.0);
let hess = m.hessenberg(); let hess = m.hessenberg();
let (p, h) = hess.unpack(); let (p, h) = hess.unpack();
relative_eq!(m, p * h * p.conjugate_transpose(), epsilon = 1.0e-7) relative_eq!(m, p * h * p.adjoint(), epsilon = 1.0e-7)
} }
fn hessenberg_static(m: Matrix4<$scalar>) -> bool { fn hessenberg_static(m: Matrix4<$scalar>) -> bool {
let m = m.map(|e| e.0); let m = m.map(|e| e.0);
let hess = m.hessenberg(); let hess = m.hessenberg();
let (p, h) = hess.unpack(); let (p, h) = hess.unpack();
relative_eq!(m, p * h * p.conjugate_transpose(), epsilon = 1.0e-7) relative_eq!(m, p * h * p.adjoint(), epsilon = 1.0e-7)
} }
} }
} }

View File

@ -31,21 +31,21 @@ mod quickcheck_tests {
let (vecs, vals) = m.clone().schur().unpack(); let (vecs, vals) = m.clone().schur().unpack();
if !relative_eq!(&vecs * &vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7) { if !relative_eq!(&vecs * &vals * vecs.adjoint(), m, epsilon = 1.0e-7) {
println!("{:.5}{:.5}", m, &vecs * &vals * vecs.conjugate_transpose()); println!("{:.5}{:.5}", m, &vecs * &vals * vecs.adjoint());
} }
relative_eq!(&vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7) relative_eq!(&vecs * vals * vecs.adjoint(), m, epsilon = 1.0e-7)
} }
fn schur_static_mat2(m: Matrix2<$scalar>) -> bool { fn schur_static_mat2(m: Matrix2<$scalar>) -> bool {
let m = m.map(|e| e.0); let m = m.map(|e| e.0);
let (vecs, vals) = m.clone().schur().unpack(); let (vecs, vals) = m.clone().schur().unpack();
let ok = relative_eq!(vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7); let ok = relative_eq!(vecs * vals * vecs.adjoint(), m, epsilon = 1.0e-7);
if !ok { if !ok {
println!("Vecs: {:.5} Vals: {:.5}", vecs, vals); println!("Vecs: {:.5} Vals: {:.5}", vecs, vals);
println!("Reconstruction:{}{}", m, &vecs * &vals * vecs.conjugate_transpose()); println!("Reconstruction:{}{}", m, &vecs * &vals * vecs.adjoint());
} }
ok ok
} }
@ -54,10 +54,10 @@ mod quickcheck_tests {
let m = m.map(|e| e.0); let m = m.map(|e| e.0);
let (vecs, vals) = m.clone().schur().unpack(); let (vecs, vals) = m.clone().schur().unpack();
let ok = relative_eq!(vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7); let ok = relative_eq!(vecs * vals * vecs.adjoint(), m, epsilon = 1.0e-7);
if !ok { if !ok {
println!("Vecs: {:.5} Vals: {:.5}", vecs, vals); println!("Vecs: {:.5} Vals: {:.5}", vecs, vals);
println!("{:.5}{:.5}", m, &vecs * &vals * vecs.conjugate_transpose()); println!("{:.5}{:.5}", m, &vecs * &vals * vecs.adjoint());
} }
ok ok
} }
@ -66,9 +66,9 @@ mod quickcheck_tests {
let m = m.map(|e| e.0); let m = m.map(|e| e.0);
let (vecs, vals) = m.clone().schur().unpack(); let (vecs, vals) = m.clone().schur().unpack();
let ok = relative_eq!(vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7); let ok = relative_eq!(vecs * vals * vecs.adjoint(), m, epsilon = 1.0e-7);
if !ok { if !ok {
println!("{:.5}{:.5}", m, &vecs * &vals * vecs.conjugate_transpose()); println!("{:.5}{:.5}", m, &vecs * &vals * vecs.adjoint());
} }
ok ok

View File

@ -10,7 +10,7 @@ macro_rules! gen_tests(
fn unzero_diagonal<N: Complex>(a: &mut Matrix4<N>) { fn unzero_diagonal<N: Complex>(a: &mut Matrix4<N>) {
for i in 0..4 { for i in 0..4 {
if a[(i, i)].asum() < na::convert(1.0e-7) { if a[(i, i)].norm1() < na::convert(1.0e-7) {
a[(i, i)] = N::one(); a[(i, i)] = N::one();
} }
} }