diff --git a/nalgebra-glm/src/common.rs b/nalgebra-glm/src/common.rs index 6ab20371..85a14cf9 100644 --- a/nalgebra-glm/src/common.rs +++ b/nalgebra-glm/src/common.rs @@ -366,7 +366,7 @@ pub fn mix_vec( y: &TVec, a: &TVec, ) -> TVec { - x.component_mul(&(TVec::::repeat(T::one()) - a)) + y.component_mul(&a) + x.component_mul(&(TVec::::repeat(T::one()) - a)) + y.component_mul(a) } /// Returns `x * (1.0 - a) + y * a`, i.e., the linear blend of the scalars x and y using the scalar value a. diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 08f16115..586c372f 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -200,17 +200,16 @@ where eigenvalues.push(self.eigenvalues_re[c].clone()); if eigenvectors.is_some() { - eigenvectors.as_mut().unwrap().push( - (&self.eigenvectors.as_ref()) - .unwrap() - .column(c) - .into_owned(), - ); + eigenvectors + .as_mut() + .unwrap() + .push(self.eigenvectors.as_ref().unwrap().column(c).into_owned()); } if left_eigenvectors.is_some() { left_eigenvectors.as_mut().unwrap().push( - (&self.left_eigenvectors.as_ref()) + self.left_eigenvectors + .as_ref() .unwrap() .column(c) .into_owned(), @@ -285,12 +284,12 @@ where for r in 0..number_of_elements_value { vec[r] = Complex::::new( - (&self.eigenvectors.as_ref()).unwrap()[(r, c)].clone(), - (&self.eigenvectors.as_ref()).unwrap()[(r, c + 1)].clone(), + self.eigenvectors.as_ref().unwrap()[(r, c)].clone(), + self.eigenvectors.as_ref().unwrap()[(r, c + 1)].clone(), ); vec_conj[r] = Complex::::new( - (&self.eigenvectors.as_ref()).unwrap()[(r, c)].clone(), - (&self.eigenvectors.as_ref()).unwrap()[(r, c + 1)].clone(), + self.eigenvectors.as_ref().unwrap()[(r, c)].clone(), + self.eigenvectors.as_ref().unwrap()[(r, c + 1)].clone(), ); } @@ -310,12 +309,12 @@ where for r in 0..number_of_elements_value { vec[r] = Complex::::new( - (&self.left_eigenvectors.as_ref()).unwrap()[(r, c)].clone(), - (&self.left_eigenvectors.as_ref()).unwrap()[(r, c + 1)].clone(), + self.left_eigenvectors.as_ref().unwrap()[(r, c)].clone(), + self.left_eigenvectors.as_ref().unwrap()[(r, c + 1)].clone(), ); vec_conj[r] = Complex::::new( - (&self.left_eigenvectors.as_ref()).unwrap()[(r, c)].clone(), - (&self.left_eigenvectors.as_ref()).unwrap()[(r, c + 1)].clone(), + self.left_eigenvectors.as_ref().unwrap()[(r, c)].clone(), + self.left_eigenvectors.as_ref().unwrap()[(r, c + 1)].clone(), ); } diff --git a/nalgebra-sparse/src/convert/serial.rs b/nalgebra-sparse/src/convert/serial.rs index 50fc50e4..571e3376 100644 --- a/nalgebra-sparse/src/convert/serial.rs +++ b/nalgebra-sparse/src/convert/serial.rs @@ -306,7 +306,7 @@ where |val| sorted_vals.push(val), &idx_workspace[..count], &values_workspace[..count], - &Add::add, + Add::add, ); let new_col_count = sorted_minor_idx.len() - sorted_ja_current_len; diff --git a/nalgebra-sparse/src/cs.rs b/nalgebra-sparse/src/cs.rs index e000e2de..674c43c0 100644 --- a/nalgebra-sparse/src/cs.rs +++ b/nalgebra-sparse/src/cs.rs @@ -653,9 +653,9 @@ where if !monotonic && sort { let range_size = range_end - range_start; minor_index_permutation.resize(range_size, 0); - compute_sort_permutation(&mut minor_index_permutation, &minor_idx_in_lane); + compute_sort_permutation(&mut minor_index_permutation, minor_idx_in_lane); minor_idx_buffer.clear(); - minor_idx_buffer.extend_from_slice(&minor_idx_in_lane); + minor_idx_buffer.extend_from_slice(minor_idx_in_lane); apply_permutation( &mut minor_indices[range_start..range_end], &minor_idx_buffer, diff --git a/nalgebra-sparse/src/ops/mod.rs b/nalgebra-sparse/src/ops/mod.rs index 9a73148c..5fa72b2a 100644 --- a/nalgebra-sparse/src/ops/mod.rs +++ b/nalgebra-sparse/src/ops/mod.rs @@ -149,8 +149,8 @@ impl Op { #[must_use] pub fn as_ref(&self) -> Op<&T> { match self { - Op::NoOp(obj) => Op::NoOp(&obj), - Op::Transpose(obj) => Op::Transpose(&obj), + Op::NoOp(obj) => Op::NoOp(obj), + Op::Transpose(obj) => Op::Transpose(obj), } } diff --git a/nalgebra-sparse/src/ops/serial/csc.rs b/nalgebra-sparse/src/ops/serial/csc.rs index a18cca3c..85e02eb4 100644 --- a/nalgebra-sparse/src/ops/serial/csc.rs +++ b/nalgebra-sparse/src/ops/serial/csc.rs @@ -88,7 +88,7 @@ where use Op::NoOp; match (&a, &b) { - (NoOp(ref a), NoOp(ref b)) => { + (NoOp(a), NoOp(b)) => { // Note: We have to reverse the order for CSC matrices spmm_cs_prealloc(beta, &mut c.cs, alpha, &b.cs, &a.cs) } @@ -116,7 +116,7 @@ where use Op::NoOp; match (&a, &b) { - (NoOp(ref a), NoOp(ref b)) => { + (NoOp(a), NoOp(b)) => { // Note: We have to reverse the order for CSC matrices spmm_cs_prealloc_unchecked(beta, &mut c.cs, alpha, &b.cs, &a.cs) } @@ -152,9 +152,9 @@ where use Cow::*; match (&a, &b) { (NoOp(_), NoOp(_)) => unreachable!(), - (Transpose(ref a), NoOp(_)) => (Owned(a.transpose()), Borrowed(b_ref)), - (NoOp(_), Transpose(ref b)) => (Borrowed(a_ref), Owned(b.transpose())), - (Transpose(ref a), Transpose(ref b)) => (Owned(a.transpose()), Owned(b.transpose())), + (Transpose(a), NoOp(_)) => (Owned(a.transpose()), Borrowed(b_ref)), + (NoOp(_), Transpose(b)) => (Borrowed(a_ref), Owned(b.transpose())), + (Transpose(a), Transpose(b)) => (Owned(a.transpose()), Owned(b.transpose())), } }; spmm_kernel(beta, c, alpha, NoOp(a.as_ref()), NoOp(b.as_ref())) diff --git a/nalgebra-sparse/src/ops/serial/csr.rs b/nalgebra-sparse/src/ops/serial/csr.rs index 6384f26d..366f30aa 100644 --- a/nalgebra-sparse/src/ops/serial/csr.rs +++ b/nalgebra-sparse/src/ops/serial/csr.rs @@ -82,7 +82,7 @@ where use Op::NoOp; match (&a, &b) { - (NoOp(ref a), NoOp(ref b)) => spmm_cs_prealloc(beta, &mut c.cs, alpha, &a.cs, &b.cs), + (NoOp(a), NoOp(b)) => spmm_cs_prealloc(beta, &mut c.cs, alpha, &a.cs, &b.cs), _ => spmm_csr_transposed(beta, c, alpha, a, b, spmm_csr_prealloc), } } @@ -107,9 +107,7 @@ where use Op::NoOp; match (&a, &b) { - (NoOp(ref a), NoOp(ref b)) => { - spmm_cs_prealloc_unchecked(beta, &mut c.cs, alpha, &a.cs, &b.cs) - } + (NoOp(a), NoOp(b)) => spmm_cs_prealloc_unchecked(beta, &mut c.cs, alpha, &a.cs, &b.cs), _ => spmm_csr_transposed(beta, c, alpha, a, b, spmm_csr_prealloc_unchecked), } } @@ -142,9 +140,9 @@ where use Cow::*; match (&a, &b) { (NoOp(_), NoOp(_)) => unreachable!(), - (Transpose(ref a), NoOp(_)) => (Owned(a.transpose()), Borrowed(b_ref)), - (NoOp(_), Transpose(ref b)) => (Borrowed(a_ref), Owned(b.transpose())), - (Transpose(ref a), Transpose(ref b)) => (Owned(a.transpose()), Owned(b.transpose())), + (Transpose(a), NoOp(_)) => (Owned(a.transpose()), Borrowed(b_ref)), + (NoOp(_), Transpose(b)) => (Borrowed(a_ref), Owned(b.transpose())), + (Transpose(a), Transpose(b)) => (Owned(a.transpose()), Owned(b.transpose())), } }; spmm_kernel(beta, c, alpha, NoOp(a.as_ref()), NoOp(b.as_ref())) diff --git a/src/base/edition.rs b/src/base/edition.rs index 8994eed7..b5c31819 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -598,7 +598,7 @@ impl> Matrix { if nremove.value() != 0 { unsafe { compress_rows( - &mut m.as_mut_slice(), + m.as_mut_slice(), nrows.value(), ncols.value(), i, @@ -796,7 +796,7 @@ impl> Matrix { if ninsert.value() != 0 { extend_rows( - &mut res.as_mut_slice(), + res.as_mut_slice(), nrows.value(), ncols.value(), i, @@ -909,7 +909,7 @@ impl> Matrix { unsafe { if new_nrows.value() < nrows { compress_rows( - &mut data.as_mut_slice(), + data.as_mut_slice(), nrows, ncols, new_nrows.value(), @@ -923,7 +923,7 @@ impl> Matrix { new_nrows, new_ncols, data.data, )); extend_rows( - &mut res.as_mut_slice(), + res.as_mut_slice(), nrows, new_ncols.value(), nrows, diff --git a/src/geometry/scale_ops.rs b/src/geometry/scale_ops.rs index c056a301..dc273fc8 100644 --- a/src/geometry/scale_ops.rs +++ b/src/geometry/scale_ops.rs @@ -83,7 +83,7 @@ add_sub_impl!(Mul, mul, ClosedMul; (Const, U1), (Const, U1) -> (Const, U1) const D; for; where; self: &'a Scale, right: &'b SVector, Output = SVector; - SVector::from(self.vector.component_mul(&right)); + SVector::from(self.vector.component_mul(right)); 'a, 'b); add_sub_impl!(Mul, mul, ClosedMul; @@ -97,7 +97,7 @@ add_sub_impl!(Mul, mul, ClosedMul; (Const, U1), (Const, U1) -> (Const, U1) const D; for; where; self: Scale, right: &'b SVector, Output = SVector; - SVector::from(self.vector.component_mul(&right)); + SVector::from(self.vector.component_mul(right)); 'b); add_sub_impl!(Mul, mul, ClosedMul; diff --git a/src/linalg/householder.rs b/src/linalg/householder.rs index 79d7c768..08212c67 100644 --- a/src/linalg/householder.rs +++ b/src/linalg/householder.rs @@ -64,8 +64,8 @@ where if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); let sign = reflection_norm.clone().signum(); - if let Some(mut work) = bilateral { - refl.reflect_rows_with_sign(&mut right, &mut work, sign.clone()); + if let Some(work) = bilateral { + refl.reflect_rows_with_sign(&mut right, work, sign.clone()); } refl.reflect_with_sign(&mut right.rows_range_mut(icol + shift..), sign.conjugate()); } diff --git a/src/linalg/svd3.rs b/src/linalg/svd3.rs index b36e0889..a8c39d28 100644 --- a/src/linalg/svd3.rs +++ b/src/linalg/svd3.rs @@ -13,7 +13,7 @@ pub fn svd_ordered3( eps: T, niter: usize, ) -> Option> { - let s = m.tr_mul(&m); + let s = m.tr_mul(m); let mut v = s.try_symmetric_eigen(eps, niter)?.eigenvectors; let mut b = m * &v;