From ce24ea972ef51d0319b3e0b08c39716d8ab4c1f9 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Sat, 23 Mar 2019 14:13:00 +0100 Subject: [PATCH] Remove all spurious allocation introduced by complex number support on decompositions. --- nalgebra-lapack/src/lu.rs | 2 +- src/base/blas.rs | 12 +++--- src/base/edition.rs | 23 ++++++++-- src/base/matrix.rs | 37 +++++++++------- src/base/matrix_alga.rs | 2 +- src/base/norm.rs | 14 ++++--- src/geometry/reflection.rs | 4 +- src/geometry/unit_complex_ops.rs | 65 ++--------------------------- src/linalg/bidiagonal.rs | 10 +---- src/linalg/cholesky.rs | 3 +- src/linalg/hessenberg.rs | 13 +++--- src/linalg/qr.rs | 8 +--- src/linalg/schur.rs | 8 ++-- src/linalg/svd.rs | 10 ++--- src/linalg/symmetric_eigen.rs | 8 ++-- src/linalg/symmetric_tridiagonal.rs | 3 +- 16 files changed, 87 insertions(+), 135 deletions(-) diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index dc21c12b..21fdfb41 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -253,7 +253,7 @@ where /// be determined. /// /// Returns `false` if no solution was found (the decomposed matrix is singular). - pub fn solve_conjugate_transpose_mut( + pub fn solve_adjoint_mut( &self, b: &mut MatrixMN, ) -> bool diff --git a/src/base/blas.rs b/src/base/blas.rs index 210db8cd..9f919e11 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -585,7 +585,7 @@ where a: &SquareMatrix, x: &Vector, beta: N, - dotc: impl Fn(&DVectorSlice, &DVectorSlice) -> N, + dot: impl Fn(&DVectorSlice, &DVectorSlice) -> N, ) where N: One, SB: Storage, @@ -613,11 +613,11 @@ where let col2 = a.column(0); let val = unsafe { *x.vget_unchecked(0) }; self.axpy(alpha * val, &col2, beta); - self[0] += alpha * dotc(&a.slice_range(1.., 0), &x.rows_range(1..)); + self[0] += alpha * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); for j in 1..dim2 { let col2 = a.column(j); - let dot = dotc(&col2.rows_range(j..), &x.rows_range(j..)); + let dot = dot(&col2.rows_range(j..), &x.rows_range(j..)); let val; unsafe { @@ -1083,7 +1083,7 @@ impl> Matrix where N: Scalar + Zero + ClosedAdd + ClosedMul { #[inline(always)] - fn sygerx( + fn xxgerx( &mut self, alpha: N, x: &Vector, @@ -1186,7 +1186,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul SC: Storage, ShapeConstraint: DimEq + DimEq, { - self.sygerx(alpha, x, y, beta, |e| e) + self.xxgerx(alpha, x, y, beta, |e| e) } /// Computes `self = alpha * x * y.transpose() + beta * self`, where `self` is a **symmetric** @@ -1221,7 +1221,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul SC: Storage, ShapeConstraint: DimEq + DimEq, { - self.sygerx(alpha, x, y, beta, Complex::conjugate) + self.xxgerx(alpha, x, y, beta, Complex::conjugate) } } diff --git a/src/base/edition.rs b/src/base/edition.rs index 32e2c1aa..b95e0fcf 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -137,10 +137,10 @@ impl> Matrix { /// Fills the diagonal of this matrix with the content of the given vector. #[inline] pub fn set_diagonal(&mut self, diag: &Vector) - where - R: DimMin, - S2: Storage, - ShapeConstraint: DimEq, R2>, + where + R: DimMin, + S2: Storage, + ShapeConstraint: DimEq, R2>, { let (nrows, ncols) = self.shape(); let min_nrows_ncols = cmp::min(nrows, ncols); @@ -151,6 +151,21 @@ impl> Matrix { } } + /// Fills the diagonal of this matrix with the content of the given iterator. + /// + /// This will fill as many diagonal elements as the iterator yields, up to the + /// minimum of the number of rows and columns of `self`, and starting with the + /// diagonal element at index (0, 0). + #[inline] + pub fn set_partial_diagonal(&mut self, diag: impl Iterator) { + let (nrows, ncols) = self.shape(); + let min_nrows_ncols = cmp::min(nrows, ncols); + + for (i, val) in diag.enumerate().take(min_nrows_ncols) { + unsafe { *self.get_unchecked_mut((i, i)) = val } + } + } + /// Fills the selected row of this matrix with the content of the given vector. #[inline] pub fn set_row(&mut self, i: usize, row: &RowVector) diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 6a89e945..77d49f42 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -981,14 +981,14 @@ impl> Matrix { self.map(|e| e.conjugate()) } - /// Divides each component of `self` by the given real. + /// Divides each component of the complex matrix `self` by the given real. #[inline] pub fn unscale(&self, real: N::Real) -> MatrixMN where DefaultAllocator: Allocator { self.map(|e| e.unscale(real)) } - /// Multiplies each component of `self` by the given real. + /// Multiplies each component of the complex matrix `self` by the given real. #[inline] pub fn scale(&self, real: N::Real) -> MatrixMN where DefaultAllocator: Allocator { @@ -997,19 +997,19 @@ impl> Matrix { } impl> Matrix { - /// The conjugate of `self` computed in-place. + /// The conjugate of the complex matrix `self` computed in-place. #[inline] pub fn conjugate_mut(&mut self) { self.apply(|e| e.conjugate()) } - /// Divides each component of `self` by the given real. + /// Divides each component of the complex matrix `self` by the given real. #[inline] pub fn unscale_mut(&mut self, real: N::Real) { self.apply(|e| e.unscale(real)) } - /// Multiplies each component of `self` by the given real. + /// Multiplies each component of the complex matrix `self` by the given real. #[inline] pub fn scale_mut(&mut self, real: N::Real) { self.apply(|e| e.scale(real)) @@ -1017,8 +1017,14 @@ impl> Matrix { } impl> Matrix { - /// Sets `self` to its conjugate transpose. - pub fn conjugate_transpose_mut(&mut self) { + /// Sets `self` to its adjoint. + #[deprecated(note = "Renamed to `self.adjoint_mut()`.")] + pub fn conjugate_transform_mut(&mut self) { + self.adjoint_mut() + } + + /// Sets `self` to its adjoint (aka. conjugate-transpose). + pub fn adjoint_mut(&mut self) { assert!( self.is_square(), "Unable to transpose a non-square matrix in-place." @@ -1027,11 +1033,6 @@ impl> Matrix { let dim = self.shape().0; for i in 0..dim { - { - let diag = unsafe { self.get_unchecked_mut((i, i)) }; - *diag = diag.conjugate(); - } - for j in 0..i { unsafe { let ref_ij = self.get_unchecked_mut((i, j)) as *mut N; @@ -1042,6 +1043,11 @@ impl> Matrix { *ref_ji = conj_ij; } } + + { + let diag = unsafe { self.get_unchecked_mut((i, i)) }; + *diag = diag.conjugate(); + } } } } @@ -1614,10 +1620,10 @@ impl> Unit> { where DefaultAllocator: Allocator, { - let c_hang = self.dotc(rhs).real(); + let (c_hang, c_hang_sign) = self.dotc(rhs).to_exp(); // self == other - if c_hang.abs() >= N::Real::one() { + if c_hang >= N::Real::one() { return Some(Unit::new_unchecked(self.clone_owned())); } @@ -1630,7 +1636,8 @@ impl> Unit> { } else { let ta = ((N::Real::one() - t) * hang).sin() / s_hang; let tb = (t * hang).sin() / s_hang; - let res = &**self * N::from_real(ta) + &**rhs * N::from_real(tb); + let mut res = self.scale(ta); + res.axpy(c_hang_sign.scale(tb), &**rhs, N::one()); Some(Unit::new_unchecked(res)) } diff --git a/src/base/matrix_alga.rs b/src/base/matrix_alga.rs index 4d540f46..43774f17 100644 --- a/src/base/matrix_alga.rs +++ b/src/base/matrix_alga.rs @@ -269,7 +269,7 @@ where DefaultAllocator: Allocator let v = &vs[0]; let mut a; - if v[0].modulus() > v[1].modulus() { + if v[0].norm1() > v[1].norm1() { a = Self::from_column_slice(&[v[2], N::zero(), -v[0]]); } else { a = Self::from_column_slice(&[N::zero(), -v[2], v[1]]); diff --git a/src/base/norm.rs b/src/base/norm.rs index 2a50f4ec..b3d695e6 100644 --- a/src/base/norm.rs +++ b/src/base/norm.rs @@ -33,7 +33,7 @@ impl Norm for EuclideanNorm { #[inline] fn norm(&self, m: &Matrix) -> N::Real where R: Dim, C: Dim, S: Storage { - m.dotc(m).real().sqrt() + m.norm_squared().sqrt() } #[inline] @@ -43,7 +43,7 @@ impl Norm for EuclideanNorm { ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { m1.zip_fold(m2, N::Real::zero(), |acc, a, b| { let diff = a - b; - acc + (diff.conjugate() * diff).real() + acc + diff.modulus_squared() }).sqrt() } } @@ -73,6 +73,8 @@ impl Norm for UniformNorm { #[inline] fn norm(&self, m: &Matrix) -> N::Real where R: Dim, C: Dim, S: Storage { + // NOTE: we don't use `m.amax()` here because for the complex + // numbers this will return the max norm1 instead of the modulus. m.fold(N::Real::zero(), |acc, a| acc.max(a.modulus())) } @@ -187,7 +189,7 @@ impl> Matrix { #[inline] pub fn normalize(&self) -> MatrixMN where DefaultAllocator: Allocator { - self.map(|e| e.unscale(self.norm())) + self.unscale(self.norm()) } /// Returns a normalized version of this matrix unless its norm as smaller or equal to `eps`. @@ -199,7 +201,7 @@ impl> Matrix { if n <= min_norm { None } else { - Some(self.map(|e| e.unscale(n))) + Some(self.unscale(n)) } } @@ -216,7 +218,7 @@ impl> Matrix { #[inline] pub fn normalize_mut(&mut self) -> N::Real { let n = self.norm(); - self.apply(|e| e.unscale(n)); + self.unscale_mut(n); n } @@ -231,7 +233,7 @@ impl> Matrix { if n <= min_norm { None } else { - self.apply(|e| e.unscale(n)); + self.unscale_mut(n); Some(n) } } diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index 653a9bdc..45e6381c 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -96,7 +96,7 @@ impl> Reflection { } let m_two: N = ::convert(-2.0f64); - lhs.ger(m_two, &work, &self.axis.conjugate(), N::one()); + lhs.gerc(m_two, &work, &self.axis, N::one()); } /// Applies the reflection to the rows of `lhs`. @@ -118,6 +118,6 @@ impl> Reflection { } let m_two = sign.scale(::convert(-2.0f64)); - lhs.ger(m_two, &work, &self.axis.conjugate(), sign); + lhs.gerc(m_two, &work, &self.axis, sign); } } diff --git a/src/geometry/unit_complex_ops.rs b/src/geometry/unit_complex_ops.rs index 75b45eda..28c9a9c3 100644 --- a/src/geometry/unit_complex_ops.rs +++ b/src/geometry/unit_complex_ops.rs @@ -2,10 +2,9 @@ use std::ops::{Div, DivAssign, Mul, MulAssign}; use alga::general::Real; use base::allocator::Allocator; -use base::constraint::{DimEq, ShapeConstraint}; -use base::dimension::{Dim, U1, U2}; -use base::storage::{Storage, StorageMut}; -use base::{DefaultAllocator, Matrix, Unit, Vector, Vector2}; +use base::dimension::{U1, U2}; +use base::storage::Storage; +use base::{DefaultAllocator, Unit, Vector, Vector2}; use geometry::{Isometry, Point2, Rotation, Similarity, Translation, UnitComplex}; /* @@ -404,60 +403,4 @@ where DefaultAllocator: Allocator fn div_assign(&mut self, rhs: &'b UnitComplex) { self.div_assign(rhs.to_rotation_matrix()) } -} - -// Matrix = UnitComplex * Matrix -impl UnitComplex { - /// Performs the multiplication `rhs = self * rhs` in-place. - pub fn rotate>( - &self, - rhs: &mut Matrix, - ) where - ShapeConstraint: DimEq, - { - assert_eq!( - rhs.nrows(), - 2, - "Unit complex rotation: the input matrix must have exactly two rows." - ); - let i = self.as_ref().im; - let r = self.as_ref().re; - - for j in 0..rhs.ncols() { - unsafe { - let a = *rhs.get_unchecked((0, j)); - let b = *rhs.get_unchecked((1, j)); - - *rhs.get_unchecked_mut((0, j)) = r * a - i * b; - *rhs.get_unchecked_mut((1, j)) = i * a + r * b; - } - } - } - - /// Performs the multiplication `lhs = lhs * self` in-place. - pub fn rotate_rows>( - &self, - lhs: &mut Matrix, - ) where - ShapeConstraint: DimEq, - { - assert_eq!( - lhs.ncols(), - 2, - "Unit complex rotation: the input matrix must have exactly two columns." - ); - let i = self.as_ref().im; - let r = self.as_ref().re; - - // FIXME: can we optimize that to iterate on one column at a time ? - for j in 0..lhs.nrows() { - unsafe { - let a = *lhs.get_unchecked((j, 0)); - let b = *lhs.get_unchecked((j, 1)); - - *lhs.get_unchecked_mut((j, 0)) = r * a + i * b; - *lhs.get_unchecked_mut((j, 1)) = -i * a + r * b; - } - } - } -} +} \ No newline at end of file diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index 7de0b887..9bf3cd12 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -179,9 +179,6 @@ where DefaultAllocator: Allocator, DimMinimum> + Allocator> + Allocator, C>, - // FIXME: the following bounds are ugly. - DimMinimum: DimMin, Output = DimMinimum>, - ShapeConstraint: DimEq, U1>>, { // FIXME: optimize by calling a reallocator. (self.u(), self.d(), self.v_t()) @@ -192,19 +189,16 @@ where pub fn d(&self) -> MatrixN> where DefaultAllocator: Allocator, DimMinimum>, - // FIXME: the following bounds are ugly. - DimMinimum: DimMin, Output = DimMinimum>, - ShapeConstraint: DimEq, U1>>, { let (nrows, ncols) = self.uv.data.shape(); let d = nrows.min(ncols); let mut res = MatrixN::identity_generic(d, d); - res.set_diagonal(&self.diagonal.map(|e| N::from_real(e.modulus()))); + res.set_partial_diagonal(self.diagonal.iter().map(|e| N::from_real(e.modulus()))); let start = self.axis_shift(); res.slice_mut(start, (d.value() - 1, d.value() - 1)) - .set_diagonal(&self.off_diagonal.map(|e| N::from_real(e.modulus()))); + .set_partial_diagonal(self.off_diagonal.iter().map(|e| N::from_real(e.modulus()))); res } diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index d4973df1..4ffacc97 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -70,11 +70,12 @@ where DefaultAllocator: Allocator let mut col = matrix.slice_range_mut(j + 1.., j); col /= denom; - continue; } } + // The diagonal element is either zero or its square root could not + // be taken (e.g. for negative real numbers). return None; } diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index a73ee5b5..bde3d325 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -92,8 +92,7 @@ where DefaultAllocator: Allocator + Allocator + Allocator (MatrixN, MatrixN) - where ShapeConstraint: DimEq> { + pub fn unpack(self) -> (MatrixN, MatrixN) { let q = self.q(); (q, self.unpack_h()) @@ -101,13 +100,12 @@ where DefaultAllocator: Allocator + Allocator + Allocator MatrixN - where ShapeConstraint: DimEq> { + pub fn unpack_h(mut self) -> MatrixN { let dim = self.hess.nrows(); self.hess.fill_lower_triangle(N::zero(), 2); self.hess .slice_mut((1, 0), (dim - 1, dim - 1)) - .set_diagonal(&self.subdiag.map(|e| N::from_real(e.modulus()))); + .set_partial_diagonal(self.subdiag.iter().map(|e| N::from_real(e.modulus()))); self.hess } @@ -116,13 +114,12 @@ where DefaultAllocator: Allocator + Allocator + Allocator MatrixN - where ShapeConstraint: DimEq> { + pub fn h(&self) -> MatrixN { let dim = self.hess.nrows(); let mut res = self.hess.clone(); res.fill_lower_triangle(N::zero(), 2); res.slice_mut((1, 0), (dim - 1, dim - 1)) - .set_diagonal(&self.subdiag.map(|e| N::from_real(e.modulus()))); + .set_partial_diagonal(self.subdiag.iter().map(|e| N::from_real(e.modulus()))); res } diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index e48c057c..12e72463 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -79,12 +79,10 @@ where DefaultAllocator: Allocator + Allocator + Allocator MatrixMN, C> where DefaultAllocator: Allocator, C>, - // FIXME: the following bound is ugly. - DimMinimum: DimMin>, { let (nrows, ncols) = self.qr.data.shape(); let mut res = self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle(); - res.set_diagonal(&self.diag.map(|e| N::from_real(e.modulus()))); + res.set_partial_diagonal(self.diag.iter().map(|e| N::from_real(e.modulus()))); res } @@ -95,13 +93,11 @@ where DefaultAllocator: Allocator + Allocator + Allocator MatrixMN, C> where DefaultAllocator: Reallocator, C>, - // FIXME: the following bound is ugly (needed by `set_diagonal`). - DimMinimum: DimMin>, { let (nrows, ncols) = self.qr.data.shape(); let mut res = self.qr.resize_generic(nrows.min(ncols), ncols, N::zero()); res.fill_lower_triangle(N::zero(), 1); - res.set_diagonal(&self.diag.map(|e| N::from_real(e.modulus()))); + res.set_partial_diagonal(self.diag.iter().map(|e| N::from_real(e.modulus()))); res } diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index 9c877d92..a6521f72 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -52,7 +52,6 @@ where impl Schur where D: DimSub, // For Hessenberg. - ShapeConstraint: DimEq>, // For Hessenberg. DefaultAllocator: Allocator> + Allocator> + Allocator @@ -341,7 +340,7 @@ where while n > 0 { let m = n - 1; - if t[(n, m)].modulus() <= eps * (t[(n, n)].modulus() + t[(m, m)].modulus()) { + if t[(n, m)].norm1() <= eps * (t[(n, n)].norm1() + t[(m, m)].norm1()) { t[(n, m)] = N::zero(); } else { break; @@ -360,7 +359,7 @@ where let off_diag = t[(new_start, m)]; if off_diag.is_zero() - || off_diag.modulus() <= eps * (t[(new_start, new_start)].modulus() + t[(m, m)].modulus()) + || off_diag.norm1() <= eps * (t[(new_start, new_start)].norm1() + t[(m, m)].norm1()) { t[(new_start, m)] = N::zero(); break; @@ -479,7 +478,7 @@ fn compute_2x2_basis>( // NOTE: Choose the one that yields a larger x component. // This is necessary for numerical stability of the normalization of the complex // number. - if x1.modulus() > x2.modulus() { + if x1.norm1() > x2.norm1() { Some(GivensRotation::new(x1, h10).0) } else { Some(GivensRotation::new(x2, h10).0) @@ -492,7 +491,6 @@ fn compute_2x2_basis>( impl> SquareMatrix where D: DimSub, // For Hessenberg. - ShapeConstraint: DimEq>, // For Hessenberg. DefaultAllocator: Allocator> + Allocator> + Allocator diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index 12ee0469..0b5d0509 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -316,17 +316,17 @@ where let m = n - 1; if off_diagonal[m].is_zero() - || off_diagonal[m].modulus() <= eps * (diagonal[n].modulus() + diagonal[m].modulus()) + || off_diagonal[m].norm1() <= eps * (diagonal[n].norm1() + diagonal[m].norm1()) { off_diagonal[m] = N::Real::zero(); - } else if diagonal[m].modulus() <= eps { + } else if diagonal[m].norm1() <= eps { diagonal[m] = N::Real::zero(); Self::cancel_horizontal_off_diagonal_elt(diagonal, off_diagonal, u, v_t, is_upper_diagonal, m, m + 1); if m != 0 { Self::cancel_vertical_off_diagonal_elt(diagonal, off_diagonal, u, v_t, is_upper_diagonal, m - 1); } - } else if diagonal[n].modulus() <= eps { + } else if diagonal[n].norm1() <= eps { diagonal[n] = N::Real::zero(); Self::cancel_vertical_off_diagonal_elt(diagonal, off_diagonal, u, v_t, is_upper_diagonal, m); } else { @@ -344,13 +344,13 @@ where while new_start > 0 { let m = new_start - 1; - if off_diagonal[m].modulus() <= eps * (diagonal[new_start].modulus() + diagonal[m].modulus()) + if off_diagonal[m].norm1() <= eps * (diagonal[new_start].norm1() + diagonal[m].norm1()) { off_diagonal[m] = N::Real::zero(); break; } // FIXME: write a test that enters this case. - else if diagonal[m].modulus() <= eps { + else if diagonal[m].norm1() <= eps { diagonal[m] = N::Real::zero(); Self::cancel_horizontal_off_diagonal_elt(diagonal, off_diagonal, u, v_t, is_upper_diagonal, m, n); diff --git a/src/linalg/symmetric_eigen.rs b/src/linalg/symmetric_eigen.rs index 87b08dc2..acdf956e 100644 --- a/src/linalg/symmetric_eigen.rs +++ b/src/linalg/symmetric_eigen.rs @@ -184,7 +184,7 @@ where DefaultAllocator: Allocator + Allocator } } - if off_diag[m].modulus() <= eps * (diag[m].modulus() + diag[n].modulus()) { + if off_diag[m].norm1() <= eps * (diag[m].norm1() + diag[n].norm1()) { end -= 1; } } else if subdim == 2 { @@ -240,7 +240,7 @@ where DefaultAllocator: Allocator + Allocator while n > 0 { let m = n - 1; - if off_diag[m].modulus() > eps * (diag[n].modulus() + diag[m].modulus()) { + if off_diag[m].norm1() > eps * (diag[n].norm1() + diag[m].norm1()) { break; } @@ -256,7 +256,7 @@ where DefaultAllocator: Allocator + Allocator let m = new_start - 1; if off_diag[m].is_zero() - || off_diag[m].modulus() <= eps * (diag[new_start].modulus() + diag[m].modulus()) + || off_diag[m].norm1() <= eps * (diag[new_start].norm1() + diag[m].norm1()) { off_diag[m] = N::Real::zero(); break; @@ -277,7 +277,7 @@ where DefaultAllocator: Allocator + Allocator let val = self.eigenvalues[i]; u_t.column_mut(i).scale_mut(val); } - u_t.conjugate_transpose_mut(); + u_t.adjoint_mut(); &self.eigenvectors * u_t } } diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index d652ce4c..76267554 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -76,9 +76,8 @@ where DefaultAllocator: Allocator + Allocator> let mut p = p.rows_range_mut(i..); p.hegemv(::convert(2.0), &m, &axis, N::zero()); - let dot = axis.dotc(&p); -// p.axpy(-dot, &axis.conjugate(), N::one()); + let dot = axis.dotc(&p); m.hegerc(-N::one(), &p, &axis, N::one()); m.hegerc(-N::one(), &axis, &p, N::one()); m.hegerc(dot * ::convert(2.0), &axis, &axis, N::one());