From 589aebbf857c4bb2cb7e0a7ee95dd7b466e09897 Mon Sep 17 00:00:00 2001 From: Christian Authmann <8371862+cauthmann@users.noreply.github.com> Date: Wed, 5 Jun 2019 23:04:04 +0200 Subject: [PATCH 01/67] Add #[must_use] to all functions with a _mut variant (#598) --- src/base/cg.rs | 6 ++++++ src/base/matrix.rs | 5 +++++ src/base/matrix_alga.rs | 3 +++ src/base/norm.rs | 2 ++ src/base/ops.rs | 1 + src/geometry/isometry.rs | 1 + src/geometry/isometry_alga.rs | 1 + src/geometry/quaternion.rs | 5 +++++ src/geometry/rotation.rs | 2 ++ src/geometry/rotation_alga.rs | 1 + src/geometry/similarity.rs | 3 +++ src/geometry/similarity_alga.rs | 1 + src/geometry/transform.rs | 2 ++ src/geometry/transform_alga.rs | 1 + src/geometry/translation.rs | 1 + src/geometry/translation_alga.rs | 1 + src/geometry/unit_complex.rs | 2 ++ src/geometry/unit_complex_alga.rs | 1 + src/linalg/inverse.rs | 1 + 19 files changed, 40 insertions(+) diff --git a/src/base/cg.rs b/src/base/cg.rs index d23d5cd3..2c0b1483 100644 --- a/src/base/cg.rs +++ b/src/base/cg.rs @@ -156,6 +156,7 @@ impl Matrix4 { impl> SquareMatrix { /// Computes the transformation equal to `self` followed by an uniform scaling factor. #[inline] + #[must_use = "Did you mean to use append_scaling_mut()?"] pub fn append_scaling(&self, scaling: N) -> MatrixN where D: DimNameSub, @@ -168,6 +169,7 @@ impl> SquareMatrix { /// Computes the transformation equal to an uniform scaling factor followed by `self`. #[inline] + #[must_use = "Did you mean to use prepend_scaling_mut()?"] pub fn prepend_scaling(&self, scaling: N) -> MatrixN where D: DimNameSub, @@ -180,6 +182,7 @@ impl> SquareMatrix { /// Computes the transformation equal to `self` followed by a non-uniform scaling factor. #[inline] + #[must_use = "Did you mean to use append_nonuniform_scaling_mut()?"] pub fn append_nonuniform_scaling( &self, scaling: &Vector, SB>, @@ -196,6 +199,7 @@ impl> SquareMatrix { /// Computes the transformation equal to a non-uniform scaling factor followed by `self`. #[inline] + #[must_use = "Did you mean to use prepend_nonuniform_scaling_mut()?"] pub fn prepend_nonuniform_scaling( &self, scaling: &Vector, SB>, @@ -212,6 +216,7 @@ impl> SquareMatrix { /// Computes the transformation equal to `self` followed by a translation. #[inline] + #[must_use = "Did you mean to use append_translation_mut()?"] pub fn append_translation(&self, shift: &Vector, SB>) -> MatrixN where D: DimNameSub, @@ -225,6 +230,7 @@ impl> SquareMatrix { /// Computes the transformation equal to a translation followed by `self`. #[inline] + #[must_use = "Did you mean to use prepend_translation_mut()?"] pub fn prepend_translation( &self, shift: &Vector, SB>, diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 7119a8ad..be4a2c8b 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -610,6 +610,7 @@ impl> Matrix { /// Transposes `self`. #[inline] + #[must_use = "Did you mean to use transpose_mut()?"] pub fn transpose(&self) -> MatrixMN where DefaultAllocator: Allocator { let (nrows, ncols) = self.data.shape(); @@ -941,6 +942,7 @@ impl> Matrix { /// The adjoint (aka. conjugate-transpose) of `self`. #[inline] + #[must_use = "Did you mean to use adjoint_mut()?"] pub fn adjoint(&self) -> MatrixMN where DefaultAllocator: Allocator { let (nrows, ncols) = self.data.shape(); @@ -976,6 +978,7 @@ impl> Matrix { /// The conjugate of `self`. #[inline] + #[must_use = "Did you mean to use conjugate_mut()?"] pub fn conjugate(&self) -> MatrixMN where DefaultAllocator: Allocator { self.map(|e| e.conjugate()) @@ -983,6 +986,7 @@ impl> Matrix { /// Divides each component of the complex matrix `self` by the given real. #[inline] + #[must_use = "Did you mean to use unscale_mut()?"] pub fn unscale(&self, real: N::RealField) -> MatrixMN where DefaultAllocator: Allocator { self.map(|e| e.unscale(real)) @@ -990,6 +994,7 @@ impl> Matrix { /// Multiplies each component of the complex matrix `self` by the given real. #[inline] + #[must_use = "Did you mean to use scale_mut()?"] pub fn scale(&self, real: N::RealField) -> MatrixMN where DefaultAllocator: Allocator { self.map(|e| e.scale(real)) diff --git a/src/base/matrix_alga.rs b/src/base/matrix_alga.rs index ac6aced7..76a9d7ce 100644 --- a/src/base/matrix_alga.rs +++ b/src/base/matrix_alga.rs @@ -51,6 +51,7 @@ where DefaultAllocator: Allocator, { #[inline] + #[must_use = "Did you mean to use two_sided_inverse_mut()?"] fn two_sided_inverse(&self) -> Self { -self } @@ -162,6 +163,7 @@ where DefaultAllocator: Allocator } #[inline] + #[must_use = "Did you mean to use normalize_mut()?"] fn normalize(&self) -> Self { self.normalize() } @@ -172,6 +174,7 @@ where DefaultAllocator: Allocator } #[inline] + #[must_use = "Did you mean to use try_normalize_mut()?"] fn try_normalize(&self, min_norm: N::RealField) -> Option { self.try_normalize(min_norm) } diff --git a/src/base/norm.rs b/src/base/norm.rs index 93319ddc..d05ae9b7 100644 --- a/src/base/norm.rs +++ b/src/base/norm.rs @@ -187,6 +187,7 @@ impl> Matrix { /// Returns a normalized version of this matrix. #[inline] + #[must_use = "Did you mean to use normalize_mut()?"] pub fn normalize(&self) -> MatrixMN where DefaultAllocator: Allocator { self.unscale(self.norm()) @@ -194,6 +195,7 @@ impl> Matrix { /// Returns a normalized version of this matrix unless its norm as smaller or equal to `eps`. #[inline] + #[must_use = "Did you mean to use try_normalize_mut()?"] pub fn try_normalize(&self, min_norm: N::RealField) -> Option> where DefaultAllocator: Allocator { let n = self.norm(); diff --git a/src/base/ops.rs b/src/base/ops.rs index cf921c33..5639cb0d 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -829,6 +829,7 @@ where impl> Matrix { /// Adds a scalar to `self`. #[inline] + #[must_use = "Did you mean to use add_scalar_mut()?"] pub fn add_scalar(&self, rhs: N) -> MatrixMN where DefaultAllocator: Allocator { let mut res = self.clone_owned(); diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 888a8307..b68a7777 100755 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -144,6 +144,7 @@ where DefaultAllocator: Allocator /// assert_eq!(inv * (iso * pt), pt); /// ``` #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Self { let mut res = self.clone(); res.inverse_mut(); diff --git a/src/geometry/isometry_alga.rs b/src/geometry/isometry_alga.rs index e68b269c..08916775 100755 --- a/src/geometry/isometry_alga.rs +++ b/src/geometry/isometry_alga.rs @@ -36,6 +36,7 @@ where DefaultAllocator: Allocator, { #[inline] + #[must_use = "Did you mean to use two_sided_inverse_mut()?"] fn two_sided_inverse(&self) -> Self { self.inverse() } diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 40c085f2..b1ca19b9 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -120,6 +120,7 @@ impl Quaternion { /// relative_eq!(q_normalized.norm(), 1.0); /// ``` #[inline] + #[must_use = "Did you mean to use normalize_mut()?"] pub fn normalize(&self) -> Self { Self::from(self.coords.normalize()) } @@ -140,6 +141,7 @@ impl Quaternion { /// assert!(conj.i == -2.0 && conj.j == -3.0 && conj.k == -4.0 && conj.w == 1.0); /// ``` #[inline] + #[must_use = "Did you mean to use conjugate_mut()?"] pub fn conjugate(&self) -> Self { Self::from_parts(self.w, -self.imag()) } @@ -163,6 +165,7 @@ impl Quaternion { /// assert!(inv_q.is_none()); /// ``` #[inline] + #[must_use = "Did you mean to use try_inverse_mut()?"] pub fn try_inverse(&self) -> Option { let mut res = Self::from(self.coords.clone_owned()); @@ -974,6 +977,7 @@ impl UnitQuaternion { /// assert_eq!(conj, UnitQuaternion::from_axis_angle(&-axis, 1.78)); /// ``` #[inline] + #[must_use = "Did you mean to use conjugate_mut()?"] pub fn conjugate(&self) -> Self { Self::new_unchecked(self.as_ref().conjugate()) } @@ -990,6 +994,7 @@ impl UnitQuaternion { /// assert_eq!(inv * rot, UnitQuaternion::identity()); /// ``` #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Self { self.conjugate() } diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index ec9c8150..ca86dfa2 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -270,6 +270,7 @@ where DefaultAllocator: Allocator /// assert_relative_eq!(tr_rot * rot, Rotation2::identity(), epsilon = 1.0e-6); /// ``` #[inline] + #[must_use = "Did you mean to use transpose_mut()?"] pub fn transpose(&self) -> Self { Self::from_matrix_unchecked(self.matrix.transpose()) } @@ -293,6 +294,7 @@ where DefaultAllocator: Allocator /// assert_relative_eq!(inv * rot, Rotation2::identity(), epsilon = 1.0e-6); /// ``` #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Self { self.transpose() } diff --git a/src/geometry/rotation_alga.rs b/src/geometry/rotation_alga.rs index e8cf74e7..c4133e41 100755 --- a/src/geometry/rotation_alga.rs +++ b/src/geometry/rotation_alga.rs @@ -31,6 +31,7 @@ impl TwoSidedInverse for Rotation { #[inline] + #[must_use = "Did you mean to use two_sided_inverse_mut()?"] fn two_sided_inverse(&self) -> Self { self.transpose() } diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index fed04725..9f5fee41 100755 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -133,6 +133,7 @@ where /// Inverts `self`. #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Self { let mut res = self.clone(); res.inverse_mut(); @@ -166,6 +167,7 @@ where /// The similarity transformation that applies a scaling factor `scaling` before `self`. #[inline] + #[must_use = "Did you mean to use prepend_scaling_mut()?"] pub fn prepend_scaling(&self, scaling: N) -> Self { assert!( !relative_eq!(scaling, N::zero()), @@ -177,6 +179,7 @@ where /// The similarity transformation that applies a scaling factor `scaling` after `self`. #[inline] + #[must_use = "Did you mean to use append_scaling_mut()?"] pub fn append_scaling(&self, scaling: N) -> Self { assert!( !relative_eq!(scaling, N::zero()), diff --git a/src/geometry/similarity_alga.rs b/src/geometry/similarity_alga.rs index 448fb133..c3df94c1 100755 --- a/src/geometry/similarity_alga.rs +++ b/src/geometry/similarity_alga.rs @@ -33,6 +33,7 @@ where DefaultAllocator: Allocator, { #[inline] + #[must_use = "Did you mean to use two_sided_inverse_mut()?"] fn two_sided_inverse(&self) -> Self { self.inverse() } diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index baf3308b..a8570c9d 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -370,6 +370,7 @@ where DefaultAllocator: Allocator, DimNameSum> /// assert!(t.try_inverse().is_none()); /// ``` #[inline] + #[must_use = "Did you mean to use try_inverse_mut()?"] pub fn try_inverse(self) -> Option> { if let Some(m) = self.matrix.try_inverse() { Some(Transform::from_matrix_unchecked(m)) @@ -395,6 +396,7 @@ where DefaultAllocator: Allocator, DimNameSum> /// assert_relative_eq!(inv_t * proj, Projective2::identity()); /// ``` #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(self) -> Transform where C: SubTCategoryOf { // FIXME: specialize for TAffine? diff --git a/src/geometry/transform_alga.rs b/src/geometry/transform_alga.rs index ec3fd7c6..65fbb32f 100755 --- a/src/geometry/transform_alga.rs +++ b/src/geometry/transform_alga.rs @@ -32,6 +32,7 @@ where DefaultAllocator: Allocator, DimNameSum>, { #[inline] + #[must_use = "Did you mean to use two_sided_inverse_mut()?"] fn two_sided_inverse(&self) -> Self { self.clone().inverse() } diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index e64b3d2e..153b1bf1 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -130,6 +130,7 @@ where DefaultAllocator: Allocator /// assert_eq!(t.inverse() * t, Translation2::identity()); /// ``` #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Translation where N: ClosedNeg { Translation::from(-&self.vector) diff --git a/src/geometry/translation_alga.rs b/src/geometry/translation_alga.rs index 134790f6..7add6438 100755 --- a/src/geometry/translation_alga.rs +++ b/src/geometry/translation_alga.rs @@ -32,6 +32,7 @@ impl TwoSidedInverse for Translation { #[inline] + #[must_use = "Did you mean to use two_sided_inverse_mut()?"] fn two_sided_inverse(&self) -> Self { self.inverse() } diff --git a/src/geometry/unit_complex.rs b/src/geometry/unit_complex.rs index 7ba7f374..d411d82a 100755 --- a/src/geometry/unit_complex.rs +++ b/src/geometry/unit_complex.rs @@ -107,6 +107,7 @@ impl UnitComplex { /// assert_eq!(rot.complex().re, conj.complex().re); /// ``` #[inline] + #[must_use = "Did you mean to use conjugate_mut()?"] pub fn conjugate(&self) -> Self { Self::new_unchecked(self.conj()) } @@ -123,6 +124,7 @@ impl UnitComplex { /// assert_relative_eq!(inv * rot, UnitComplex::identity(), epsilon = 1.0e-6); /// ``` #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Self { self.conjugate() } diff --git a/src/geometry/unit_complex_alga.rs b/src/geometry/unit_complex_alga.rs index 24b55233..94cfa6e1 100755 --- a/src/geometry/unit_complex_alga.rs +++ b/src/geometry/unit_complex_alga.rs @@ -33,6 +33,7 @@ impl AbstractMagma for UnitComplex { impl TwoSidedInverse for UnitComplex { #[inline] + #[must_use = "Did you mean to use two_sided_inverse_mut()?"] fn two_sided_inverse(&self) -> Self { self.inverse() } diff --git a/src/linalg/inverse.rs b/src/linalg/inverse.rs index 94462442..f0920cca 100644 --- a/src/linalg/inverse.rs +++ b/src/linalg/inverse.rs @@ -10,6 +10,7 @@ use crate::linalg::lu; impl> SquareMatrix { /// Attempts to invert this matrix. #[inline] + #[must_use = "Did you mean to use try_inverse_mut()?"] pub fn try_inverse(self) -> Option> where DefaultAllocator: Allocator { let mut me = self.into_owned(); From f9921a67742d4b4bcfe69bc422e75f5b8cf8ec49 Mon Sep 17 00:00:00 2001 From: Austin Lund Date: Thu, 20 Jun 2019 09:02:28 +1000 Subject: [PATCH 02/67] Refactor row_sum() and column_sum() to cover more cases. Currently the methods for row_sum and column_sum require Field and Supersetof. This means that to perform a row_sum or column_sum requires the scalar type to have more properties than just addition. Consequently, row_sum() won't work on integer matricies. This patch makes the only requirement that the scalar type be an additive monoid. Doc tests using integers are also added. --- src/base/statistics.rs | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 0fe18130..a6fadda6 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -1,5 +1,5 @@ use crate::{Scalar, Dim, Matrix, VectorN, RowVectorN, DefaultAllocator, U1, VectorSliceN}; -use alga::general::{Field, SupersetOf}; +use alga::general::{AdditiveMonoid, Field, SupersetOf}; use crate::storage::Storage; use crate::allocator::Allocator; @@ -54,7 +54,7 @@ impl> Matrix { } } -impl, R: Dim, C: Dim, S: Storage> Matrix { +impl> Matrix { /* * * Sum computation. @@ -83,11 +83,15 @@ impl, R: Dim, C: Dim, S: Storage> M /// # Example /// /// ``` - /// # use nalgebra::{Matrix2x3, RowVector3}; + /// # use nalgebra::{Matrix2x3, Matrix3x2}; + /// # use nalgebra::{RowVector2, RowVector3}; /// /// let m = Matrix2x3::new(1.0, 2.0, 3.0, /// 4.0, 5.0, 6.0); /// assert_eq!(m.row_sum(), RowVector3::new(5.0, 7.0, 9.0)); + /// + /// let mint = Matrix3x2::new(1,2,3,4,5,6); + /// assert_eq!(mint.row_sum(), RowVector2::new(9,12)); /// ``` #[inline] pub fn row_sum(&self) -> RowVectorN @@ -100,11 +104,15 @@ impl, R: Dim, C: Dim, S: Storage> M /// # Example /// /// ``` - /// # use nalgebra::{Matrix2x3, Vector3}; + /// # use nalgebra::{Matrix2x3, Matrix3x2}; + /// # use nalgebra::{Vector2, Vector3}; /// /// let m = Matrix2x3::new(1.0, 2.0, 3.0, /// 4.0, 5.0, 6.0); /// assert_eq!(m.row_sum_tr(), Vector3::new(5.0, 7.0, 9.0)); + /// + /// let mint = Matrix3x2::new(1,2,3,4,5,6); + /// assert_eq!(mint.row_sum_tr(), Vector2::new(9,12)); /// ``` #[inline] pub fn row_sum_tr(&self) -> VectorN @@ -117,21 +125,27 @@ impl, R: Dim, C: Dim, S: Storage> M /// # Example /// /// ``` - /// # use nalgebra::{Matrix2x3, Vector2}; + /// # use nalgebra::{Matrix2x3, Matrix3x2}; + /// # use nalgebra::{Vector2, Vector3}; /// /// let m = Matrix2x3::new(1.0, 2.0, 3.0, /// 4.0, 5.0, 6.0); /// assert_eq!(m.column_sum(), Vector2::new(6.0, 15.0)); + /// + /// let mint = Matrix3x2::new(1,2,3,4,5,6); + /// assert_eq!(mint.column_sum(), Vector3::new(3,7,11)); /// ``` #[inline] pub fn column_sum(&self) -> VectorN where DefaultAllocator: Allocator { let nrows = self.data.shape().0; self.compress_columns(VectorN::zeros_generic(nrows, U1), |out, col| { - out.axpy(N::one(), &col, N::one()) + *out += col; }) } +} +impl, R: Dim, C: Dim, S: Storage> Matrix { /* * * Variance computation. From 456a5a84e7974e01aaec0ac7a25e853c65f70c4f Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sat, 2 Nov 2019 09:08:16 +0100 Subject: [PATCH 03/67] Used Storage trait for solve method See issue 667 : https://github.com/rustsim/nalgebra/issues/667 --- src/linalg/full_piv_lu.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/linalg/full_piv_lu.rs b/src/linalg/full_piv_lu.rs index 04f61faf..f2bfc874 100644 --- a/src/linalg/full_piv_lu.rs +++ b/src/linalg/full_piv_lu.rs @@ -167,7 +167,7 @@ where DefaultAllocator: Allocator + Allocator<(usize, usize), D> b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, ShapeConstraint: SameNumberOfRows, DefaultAllocator: Allocator, { From ffb69d5a6f97393b8d8d92e94bfe30a9e89d47e6 Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sat, 2 Nov 2019 09:12:34 +0100 Subject: [PATCH 04/67] used Storage trait for solve method as per issue 667 https://github.com/rustsim/nalgebra/issues/667 --- src/linalg/cholesky.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 0b6e6db5..7431d666 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -129,7 +129,7 @@ where DefaultAllocator: Allocator /// `x` the unknown. pub fn solve(&self, b: &Matrix) -> MatrixMN where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { From 6f1e924e40efe1e419f80784a2d385d1a36c4c3c Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sat, 2 Nov 2019 09:15:35 +0100 Subject: [PATCH 05/67] used Storage trait for solve see issue 667 --- src/linalg/qr.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index 683c11b8..74b4de85 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -170,7 +170,7 @@ where DefaultAllocator: Allocator + Allocator b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, ShapeConstraint: SameNumberOfRows, DefaultAllocator: Allocator, { From a05aa313da80b81c4163a979b375359e32136262 Mon Sep 17 00:00:00 2001 From: Andreas Longva Date: Mon, 4 Nov 2019 17:53:21 +0100 Subject: [PATCH 06/67] Implement From<&Matrix> for MatrixSlice --- src/base/conversion.rs | 58 ++++++++++++++++++++++++++ tests/core/conversion.rs | 89 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 147 insertions(+) diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 4c5bb017..36ac43c1 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -21,6 +21,7 @@ use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, Sto #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::VecStorage; use crate::base::{DefaultAllocator, Matrix, ArrayStorage, MatrixMN, MatrixSlice, MatrixSliceMut, Scalar}; +use crate::constraint::DimEq; // FIXME: too bad this won't work allo slice conversions. impl SubsetOf> for MatrixMN @@ -424,3 +425,60 @@ where matrix_slice.into_owned() } } + +impl<'a, N, R, C, RSlice, CSlice, S> From<&'a Matrix> +for MatrixSlice<'a, N, RSlice, CSlice, S::RStride, S::CStride> + where + N: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + S: Storage, + ShapeConstraint: DimEq + DimEq +{ + fn from(m: &'a Matrix) -> Self { + let (row, col) = m.data.shape(); + let row_slice = RSlice::from_usize(row.value()); + let col_slice = CSlice::from_usize(col.value()); + m.generic_slice((0, 0), (row_slice, col_slice)) + } +} + +impl<'a, N, R, C, RSlice, CSlice, S> From<&'a mut Matrix> +for MatrixSlice<'a, N, RSlice, CSlice, S::RStride, S::CStride> + where + N: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + S: Storage, + ShapeConstraint: DimEq + DimEq +{ + fn from(m: &'a mut Matrix) -> Self { + let (row, col) = m.data.shape(); + let row_slice = RSlice::from_usize(row.value()); + let col_slice = CSlice::from_usize(col.value()); + m.generic_slice((0, 0), (row_slice, col_slice)) + } +} + +impl<'a, N, R, C, RSlice, CSlice, S> From<&'a mut Matrix> +for MatrixSliceMut<'a, N, RSlice, CSlice, S::RStride, S::CStride> + where + N: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + S: StorageMut, + ShapeConstraint: DimEq + DimEq +{ + fn from(m: &'a mut Matrix) -> Self { + let (row, col) = m.data.shape(); + let row_slice = RSlice::from_usize(row.value()); + let col_slice = CSlice::from_usize(col.value()); + m.generic_slice_mut((0, 0), (row_slice, col_slice)) + } +} \ No newline at end of file diff --git a/tests/core/conversion.rs b/tests/core/conversion.rs index f8be8588..20954a25 100644 --- a/tests/core/conversion.rs +++ b/tests/core/conversion.rs @@ -8,6 +8,8 @@ use na::{ RowVector4, RowVector5, RowVector6, Similarity3, Transform3, Translation3, UnitQuaternion, Vector1, Vector2, Vector3, Vector4, Vector5, Vector6, }; +use na::{U3, U4}; +use na::{DMatrix, MatrixSlice, MatrixSliceMut, DMatrixSlice, DMatrixSliceMut}; quickcheck!{ fn translation_conversion(t: Translation3, v: Vector3, p: Point3) -> bool { @@ -250,3 +252,90 @@ array_matrix_conversion!( array_matrix_conversion_6_5, Matrix6x5, (6, 5); array_matrix_conversion_6_6, Matrix6, (6, 6); ); + +#[test] +fn matrix_slice_from_matrix_ref() { + let a = Matrix3x4::new(11.0, 12.0, 13.0, 14.0, + 21.0, 22.0, 23.0, 24.0, + 31.0, 32.0, 33.0, 34.0); + + // TODO: What's a more idiomatic/better way to convert a static matrix to a dynamic one? + let d = DMatrix::from(a.get((0..a.nrows(), 0..a.ncols())).unwrap()); + + // Note: these have to be macros, and not functions, because the input type is different + // across the different tests. Moreover, the output type depends on the stride of the input, + // which is different for static and dynamic matrices. + macro_rules! dynamic_slice { ($mref:expr) => { DMatrixSlice::from($mref) } } + macro_rules! dynamic_slice_mut { ($mref:expr) => { DMatrixSliceMut::from($mref) } } + macro_rules! fixed_slice { ($mref:expr) => { MatrixSlice::<_, U3, U4, _, _>::from($mref)} }; + macro_rules! fixed_slice_mut { + ($mref:expr) => { MatrixSliceMut::<_, U3, U4, _, _>::from($mref) } + }; + + // TODO: The `into_owned()` is a result of `PartialEq` not being implemented for different + // Self and RHS. See issue #674. Once this is implemented, we can remove `into_owned` + // from the below tests. + + // Construct slices from reference to a + { + assert_eq!(a, fixed_slice!(&a).into_owned()); + assert_eq!(d, dynamic_slice!(&a).into_owned()); + } + + // Construct slices from mutable reference to a + { + let mut a_clone = a.clone(); + assert_eq!(a, fixed_slice!(&mut a_clone).into_owned()); + assert_eq!(d, dynamic_slice!(&mut a_clone).into_owned()); + } + + // Construct mutable slices from mutable reference to a + { + let mut a_clone = a.clone(); + assert_eq!(a, fixed_slice_mut!(&mut a_clone).into_owned()); + assert_eq!(d, dynamic_slice_mut!(&mut a_clone).into_owned()); + } + + // Construct slices from reference to d + { + assert_eq!(a, fixed_slice!(&d).into_owned()); + assert_eq!(d, dynamic_slice!(&d).into_owned()); + } + + // Construct slices from mutable reference to d + { + let mut d_clone = a.clone(); + assert_eq!(a, fixed_slice!(&mut d_clone).into_owned()); + assert_eq!(d, dynamic_slice!(&mut d_clone).into_owned()); + } + + // Construct mutable slices from mutable reference to d + { + let mut d_clone = d.clone(); + assert_eq!(a, fixed_slice_mut!(&mut d_clone).into_owned()); + assert_eq!(d, dynamic_slice_mut!(&mut d_clone).into_owned()); + } + + // Construct slices from a slice of a + { + let mut a_slice = fixed_slice!(&a); + assert_eq!(a, fixed_slice!(&a_slice).into_owned()); + assert_eq!(a, fixed_slice!(&mut a_slice).into_owned()); + assert_eq!(d, dynamic_slice!(&a_slice).into_owned()); + assert_eq!(d, dynamic_slice!(&mut a_slice).into_owned()); + } + + // Construct slices from a slice mut of a + { + // Need a clone of a here, so that we can both have a mutable borrow and compare equality + let mut a_clone = a.clone(); + let mut a_slice = fixed_slice_mut!(&mut a_clone); + + assert_eq!(a, fixed_slice!(&a_slice).into_owned()); + assert_eq!(a, fixed_slice!(&mut a_slice).into_owned()); + assert_eq!(d, dynamic_slice!(&a_slice).into_owned()); + assert_eq!(d, dynamic_slice!(&mut a_slice).into_owned()); + assert_eq!(a, fixed_slice_mut!(&mut a_slice).into_owned()); + assert_eq!(d, dynamic_slice_mut!(&mut a_slice).into_owned()); + } +} From 1103d49b80ad8cd637a4cb2b9a585f357944f6f5 Mon Sep 17 00:00:00 2001 From: daingun Date: Fri, 1 Nov 2019 22:12:59 +0100 Subject: [PATCH 07/67] Correct Schur decomposition for 2x2 matrices Due to rounding and possible loss of precision the lower left element of the 2x2 matrix may be different from zero. --- src/linalg/schur.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index b31be9f6..2a2bb250 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -413,6 +413,7 @@ where let inv_rot = rot.inverse(); inv_rot.rotate(&mut m); rot.rotate_rows(&mut m); + m[(1, 0)] = N::zero(); if compute_q { // XXX: we have to build the matrix manually because From 2f77d732261fcae31c0feecbd0da7c1f499946c0 Mon Sep 17 00:00:00 2001 From: Andreas Longva Date: Tue, 5 Nov 2019 17:11:39 +0100 Subject: [PATCH 08/67] Generalize From<_> for MatrixSlice(Mut) to allow different strides --- src/base/conversion.rs | 61 ++++++++++++++++++++++++++++++++++------ tests/core/conversion.rs | 10 +++---- 2 files changed, 57 insertions(+), 14 deletions(-) diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 36ac43c1..7763a086 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -20,6 +20,7 @@ use crate::base::iter::{MatrixIter, MatrixIterMut}; use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::VecStorage; +use crate::base::{SliceStorage, SliceStorageMut}; use crate::base::{DefaultAllocator, Matrix, ArrayStorage, MatrixMN, MatrixSlice, MatrixSliceMut, Scalar}; use crate::constraint::DimEq; @@ -426,59 +427,101 @@ where } } -impl<'a, N, R, C, RSlice, CSlice, S> From<&'a Matrix> -for MatrixSlice<'a, N, RSlice, CSlice, S::RStride, S::CStride> +impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix> +for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> where N: Scalar, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, + RStride: Dim, + CStride: Dim, S: Storage, ShapeConstraint: DimEq + DimEq + + DimEq + DimEq { fn from(m: &'a Matrix) -> Self { let (row, col) = m.data.shape(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); - m.generic_slice((0, 0), (row_slice, col_slice)) + + let (rstride, cstride) = m.strides(); + + let rstride_slice = RStride::from_usize(rstride); + let cstride_slice = CStride::from_usize(cstride); + + unsafe { + let data = SliceStorage::from_raw_parts(m.data.ptr(), + (row_slice, col_slice), + (rstride_slice, cstride_slice)); + Matrix::from_data_statically_unchecked(data) + } } } -impl<'a, N, R, C, RSlice, CSlice, S> From<&'a mut Matrix> -for MatrixSlice<'a, N, RSlice, CSlice, S::RStride, S::CStride> +impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> +for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> where N: Scalar, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, + RStride: Dim, + CStride: Dim, S: Storage, ShapeConstraint: DimEq + DimEq + + DimEq + DimEq { fn from(m: &'a mut Matrix) -> Self { let (row, col) = m.data.shape(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); - m.generic_slice((0, 0), (row_slice, col_slice)) + + let (rstride, cstride) = m.strides(); + + let rstride_slice = RStride::from_usize(rstride); + let cstride_slice = CStride::from_usize(cstride); + + unsafe { + let data = SliceStorage::from_raw_parts(m.data.ptr(), + (row_slice, col_slice), + (rstride_slice, cstride_slice)); + Matrix::from_data_statically_unchecked(data) + } } } -impl<'a, N, R, C, RSlice, CSlice, S> From<&'a mut Matrix> -for MatrixSliceMut<'a, N, RSlice, CSlice, S::RStride, S::CStride> +impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> +for MatrixSliceMut<'a, N, RSlice, CSlice, RStride, CStride> where N: Scalar, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, + RStride: Dim, + CStride: Dim, S: StorageMut, ShapeConstraint: DimEq + DimEq + + DimEq + DimEq { fn from(m: &'a mut Matrix) -> Self { let (row, col) = m.data.shape(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); - m.generic_slice_mut((0, 0), (row_slice, col_slice)) + + let (rstride, cstride) = m.strides(); + + let rstride_slice = RStride::from_usize(rstride); + let cstride_slice = CStride::from_usize(cstride); + + unsafe { + let data = SliceStorageMut::from_raw_parts(m.data.ptr_mut(), + (row_slice, col_slice), + (rstride_slice, cstride_slice)); + Matrix::from_data_statically_unchecked(data) + } } } \ No newline at end of file diff --git a/tests/core/conversion.rs b/tests/core/conversion.rs index 20954a25..783654ca 100644 --- a/tests/core/conversion.rs +++ b/tests/core/conversion.rs @@ -8,7 +8,7 @@ use na::{ RowVector4, RowVector5, RowVector6, Similarity3, Transform3, Translation3, UnitQuaternion, Vector1, Vector2, Vector3, Vector4, Vector5, Vector6, }; -use na::{U3, U4}; +use na::{U1, U3, U4}; use na::{DMatrix, MatrixSlice, MatrixSliceMut, DMatrixSlice, DMatrixSliceMut}; quickcheck!{ @@ -265,11 +265,11 @@ fn matrix_slice_from_matrix_ref() { // Note: these have to be macros, and not functions, because the input type is different // across the different tests. Moreover, the output type depends on the stride of the input, // which is different for static and dynamic matrices. - macro_rules! dynamic_slice { ($mref:expr) => { DMatrixSlice::from($mref) } } - macro_rules! dynamic_slice_mut { ($mref:expr) => { DMatrixSliceMut::from($mref) } } - macro_rules! fixed_slice { ($mref:expr) => { MatrixSlice::<_, U3, U4, _, _>::from($mref)} }; + macro_rules! dynamic_slice { ($mref:expr) => { DMatrixSlice::<_>::from($mref) } } + macro_rules! dynamic_slice_mut { ($mref:expr) => { DMatrixSliceMut::<_>::from($mref) } } + macro_rules! fixed_slice { ($mref:expr) => { MatrixSlice::<_, U3, U4, U1, U3>::from($mref)} }; macro_rules! fixed_slice_mut { - ($mref:expr) => { MatrixSliceMut::<_, U3, U4, _, _>::from($mref) } + ($mref:expr) => { MatrixSliceMut::<_, U3, U4, U1, U3>::from($mref) } }; // TODO: The `into_owned()` is a result of `PartialEq` not being implemented for different From 0c9451165d410dd775030c00ea2b33b8f73b3247 Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sat, 2 Nov 2019 14:59:07 +0100 Subject: [PATCH 09/67] first version of rank one update --- src/linalg/cholesky.rs | 58 ++++++++++++++++++++++++++++++++---------- 1 file changed, 44 insertions(+), 14 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 0b6e6db5..606434e9 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -6,28 +6,25 @@ use alga::general::ComplexField; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; -use crate::dimension::{Dim, DimSub, Dynamic}; +use crate::dimension::{Dim, DimSub, Dynamic, U1}; use crate::storage::{Storage, StorageMut}; /// The Cholesky decomposition of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", - serde(bound( - serialize = "DefaultAllocator: Allocator, - MatrixN: Serialize" - )) + serde(bound(serialize = "DefaultAllocator: Allocator, + MatrixN: Serialize")) )] #[cfg_attr( feature = "serde-serialize", - serde(bound( - deserialize = "DefaultAllocator: Allocator, - MatrixN: Deserialize<'de>" - )) + serde(bound(deserialize = "DefaultAllocator: Allocator, + MatrixN: Deserialize<'de>")) )] #[derive(Clone, Debug)] pub struct Cholesky -where DefaultAllocator: Allocator +where + DefaultAllocator: Allocator, { chol: MatrixN, } @@ -36,10 +33,12 @@ impl Copy for Cholesky where DefaultAllocator: Allocator, MatrixN: Copy, -{} +{ +} impl> Cholesky -where DefaultAllocator: Allocator +where + DefaultAllocator: Allocator, { /// Attempts to compute the Cholesky decomposition of `matrix`. /// @@ -129,7 +128,7 @@ where DefaultAllocator: Allocator /// `x` the unknown. pub fn solve(&self, b: &Matrix) -> MatrixMN where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { @@ -146,10 +145,41 @@ where DefaultAllocator: Allocator self.solve_mut(&mut res); res } + + /// Performs a rank one update of the current decomposition. + /// If `M = L * L^T` before the rank one update, then after it we have `L*L^T = M + sigma * v*v^T` where v must be a vector of same dimension. + /// TODO rewrite comment (current version is taken verbatim from eigen) + /// TODO insures that code is correct for complex numbers, eigen uses abs2 and conj + /// https://eigen.tuxfamily.org/dox/LLT_8h_source.html + pub fn rank_one_update(&mut self, x: &Matrix, sigma: N) + where + S2: Storage, + DefaultAllocator: Allocator, + ShapeConstraint: SameNumberOfRows, + { + let n = x.nrows(); + let mut temp = x.clone_owned(); + for k in 0..n { + let lkk = self.chol[(k, k)]; // TODO unsafe { *matrix.get_unchecked((j, j)) } + let xk = temp[k]; + let r = (lkk * lkk + sigma * xk * xk).sqrt(); + let c = r / lkk; + let s = xk / lkk; + self.chol[(k, k)] = r; + // Update the terms of L + if k < n { + for k2 in (k + 1)..n { + self.chol[(k2, k)] = (self.chol[(k2, k)] + sigma * s * temp[k2]) / c; + temp[k2] = c * temp[k2] - s * self.chol[(k2, k)]; + } + } + } + } } impl, S: Storage> SquareMatrix -where DefaultAllocator: Allocator +where + DefaultAllocator: Allocator, { /// Attempts to compute the Cholesky decomposition of this matrix. /// From cc478c6c6d57183526df2dc0505cb7ccc270d768 Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sat, 2 Nov 2019 15:11:14 +0100 Subject: [PATCH 10/67] added test for update --- tests/linalg/cholesky.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index cefc2630..52b1198c 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -1,6 +1,5 @@ #![cfg(all(feature = "arbitrary", feature = "debug"))] - macro_rules! gen_tests( ($module: ident, $scalar: ty) => { mod $module { @@ -78,6 +77,22 @@ macro_rules! gen_tests( id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7) } + + fn cholesky_rank_one_update(_n: usize) -> bool { + let m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); + let x = Vector4::<$scalar>::new_random().map(|e| e.0); + let sigma : $scalar = 1.; + + // updates m manually + let m_updated = m + sigma * x * x.transpose(); + + // updates cholesky deomposition and reconstruct m + let mut chol = m.clone().cholesky().unwrap(); + chol.rank_one_update(x, sigma); + let m_chol_updated = chol.l() * chol.l().transpose(); + + relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7) + } } } } From 5942a2a125ea9aed4b61f369c5cb50344fce49e7 Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sat, 2 Nov 2019 15:56:59 +0100 Subject: [PATCH 11/67] got test to compile --- src/linalg/cholesky.rs | 2 +- tests/linalg/cholesky.rs | 19 +++++++++++-------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 606434e9..6a2c9da8 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -151,7 +151,7 @@ where /// TODO rewrite comment (current version is taken verbatim from eigen) /// TODO insures that code is correct for complex numbers, eigen uses abs2 and conj /// https://eigen.tuxfamily.org/dox/LLT_8h_source.html - pub fn rank_one_update(&mut self, x: &Matrix, sigma: N) + pub fn rank_one_update(&mut self, x: &Matrix, sigma: N) where S2: Storage, DefaultAllocator: Allocator, diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index 52b1198c..80a54e2f 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -79,19 +79,22 @@ macro_rules! gen_tests( } fn cholesky_rank_one_update(_n: usize) -> bool { - let m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); + let mut m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); let x = Vector4::<$scalar>::new_random().map(|e| e.0); - let sigma : $scalar = 1.; + let sigma = random::<$scalar>().0; // random::<$scalar>().0; + let one = sigma*0. + 1.; // TODO this is dirty but $scalar appears to not be a scalar type + + // updates cholesky decomposition and reconstructs m + let mut chol = m.clone().cholesky().unwrap(); + chol.rank_one_update(&x, sigma); + let m_chol_updated = chol.l() * chol.l().adjoint(); // updates m manually - let m_updated = m + sigma * x * x.transpose(); + m.syger(sigma, &x, &x, one); // m += sigma * x * x.adjoint() - // updates cholesky deomposition and reconstruct m - let mut chol = m.clone().cholesky().unwrap(); - chol.rank_one_update(x, sigma); - let m_chol_updated = chol.l() * chol.l().transpose(); + println!("m : {:?}", m); - relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7) + relative_eq!(m, m_chol_updated, epsilon = 1.0e-7) } } } From 45e6ac7c2a9b318eec2849813a571b441aad3206 Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sat, 2 Nov 2019 16:36:23 +0100 Subject: [PATCH 12/67] test is now correct --- tests/linalg/cholesky.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index 80a54e2f..823ec96f 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -79,10 +79,13 @@ macro_rules! gen_tests( } fn cholesky_rank_one_update(_n: usize) -> bool { - let mut m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); - let x = Vector4::<$scalar>::new_random().map(|e| e.0); - let sigma = random::<$scalar>().0; // random::<$scalar>().0; - let one = sigma*0. + 1.; // TODO this is dirty but $scalar appears to not be a scalar type + use nalgebra::dimension::U3; + use nalgebra::Vector3; + let mut m = RandomSDP::new(U3, || random::<$scalar>().0).unwrap(); + let x = Vector3::<$scalar>::new_random().map(|e| e.0); + let mut sigma = random::<$scalar>().0; // random::<$scalar>().0; + let one = sigma*0. + 1.; // TODO this is dirty but $scalar appears to not be a scalar type in this file + sigma = one; // TODO placeholder // updates cholesky decomposition and reconstructs m let mut chol = m.clone().cholesky().unwrap(); @@ -90,9 +93,11 @@ macro_rules! gen_tests( let m_chol_updated = chol.l() * chol.l().adjoint(); // updates m manually - m.syger(sigma, &x, &x, one); // m += sigma * x * x.adjoint() + m.ger(sigma, &x, &x, one); // m += sigma * x * x.adjoint() - println!("m : {:?}", m); + println!("sigma : {}", sigma); + println!("m updated : {}", m); + println!("chol : {}", m_chol_updated); relative_eq!(m, m_chol_updated, epsilon = 1.0e-7) } From 16154f163af986914d9d14e3677b7a7100f2f36c Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sat, 2 Nov 2019 16:45:30 +0100 Subject: [PATCH 13/67] added real constraint on sigma --- src/linalg/cholesky.rs | 11 +++++++++-- tests/linalg/cholesky.rs | 11 +++++++---- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 6a2c9da8..cbbe5ff8 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -8,6 +8,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimSub, Dynamic, U1}; use crate::storage::{Storage, StorageMut}; +use crate::RealField; /// The Cholesky decomposition of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] @@ -151,12 +152,18 @@ where /// TODO rewrite comment (current version is taken verbatim from eigen) /// TODO insures that code is correct for complex numbers, eigen uses abs2 and conj /// https://eigen.tuxfamily.org/dox/LLT_8h_source.html - pub fn rank_one_update(&mut self, x: &Matrix, sigma: N) - where + /// TODO insure that sigma is a real + pub fn rank_one_update( + &mut self, + x: &Matrix, + sigma: N2, + ) where + N: From, S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { + let sigma = ::from(sigma); let n = x.nrows(); let mut temp = x.clone_owned(); for k in 0..n { diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index 823ec96f..bef5de95 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -83,9 +83,12 @@ macro_rules! gen_tests( use nalgebra::Vector3; let mut m = RandomSDP::new(U3, || random::<$scalar>().0).unwrap(); let x = Vector3::<$scalar>::new_random().map(|e| e.0); - let mut sigma = random::<$scalar>().0; // random::<$scalar>().0; - let one = sigma*0. + 1.; // TODO this is dirty but $scalar appears to not be a scalar type in this file - sigma = one; // TODO placeholder + + // TODO this is dirty but $scalar appears to not be a scalar type in this file + let zero = random::<$scalar>().0 * 0.; + let one = zero + 1.; + let sigma = random::(); // needs to be a real + let sigma_scalar = zero + sigma; // updates cholesky decomposition and reconstructs m let mut chol = m.clone().cholesky().unwrap(); @@ -93,7 +96,7 @@ macro_rules! gen_tests( let m_chol_updated = chol.l() * chol.l().adjoint(); // updates m manually - m.ger(sigma, &x, &x, one); // m += sigma * x * x.adjoint() + m.ger(sigma_scalar, &x, &x, one); // m += sigma * x * x.adjoint() println!("sigma : {}", sigma); println!("m updated : {}", m); From 96c16af66fdc93a46cf814496e4ee0c09b5c59bb Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sat, 2 Nov 2019 16:49:57 +0100 Subject: [PATCH 14/67] updated comment --- src/linalg/cholesky.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index cbbe5ff8..4063b4ee 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -147,12 +147,10 @@ where res } - /// Performs a rank one update of the current decomposition. - /// If `M = L * L^T` before the rank one update, then after it we have `L*L^T = M + sigma * v*v^T` where v must be a vector of same dimension. - /// TODO rewrite comment (current version is taken verbatim from eigen) + /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, + /// performs a rank one update such that we end up with the decomposition of `M + sigma * v*v^*`. /// TODO insures that code is correct for complex numbers, eigen uses abs2 and conj /// https://eigen.tuxfamily.org/dox/LLT_8h_source.html - /// TODO insure that sigma is a real pub fn rank_one_update( &mut self, x: &Matrix, From 7347d467aef008c714f7f664c8398c8ba39d7820 Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sat, 2 Nov 2019 18:27:01 +0100 Subject: [PATCH 15/67] rank update passed tests --- src/linalg/cholesky.rs | 43 ++++++++++++++++++++-------------------- tests/linalg/cholesky.rs | 12 +++-------- 2 files changed, 25 insertions(+), 30 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 4063b4ee..c4049504 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -8,7 +8,6 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimSub, Dynamic, U1}; use crate::storage::{Storage, StorageMut}; -use crate::RealField; /// The Cholesky decomposition of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] @@ -149,33 +148,35 @@ where /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, /// performs a rank one update such that we end up with the decomposition of `M + sigma * v*v^*`. - /// TODO insures that code is correct for complex numbers, eigen uses abs2 and conj - /// https://eigen.tuxfamily.org/dox/LLT_8h_source.html - pub fn rank_one_update( - &mut self, - x: &Matrix, - sigma: N2, - ) where - N: From, + pub fn rank_one_update(&mut self, x: &Matrix, sigma: N::RealField) + where S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let sigma = ::from(sigma); + // for a description of the operation, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition + // heavily inspired by Eigen's implementation https://eigen.tuxfamily.org/dox/LLT_8h_source.html + // TODO use unsafe { *matrix.get_unchecked((j, j)) } let n = x.nrows(); let mut temp = x.clone_owned(); - for k in 0..n { - let lkk = self.chol[(k, k)]; // TODO unsafe { *matrix.get_unchecked((j, j)) } - let xk = temp[k]; - let r = (lkk * lkk + sigma * xk * xk).sqrt(); - let c = r / lkk; - let s = xk / lkk; - self.chol[(k, k)] = r; + let mut beta = crate::one::(); + for j in 0..n { + let ljj = N::real(self.chol[(j, j)]); + let dj = ljj * ljj; + let wj = temp[j]; + let swj2 = sigma * N::modulus_squared(wj); + let gamma = dj * beta + swj2; + let nljj = (dj + swj2 / beta).sqrt(); + self.chol[(j, j)] = N::from_real(nljj); + beta += swj2 / dj; // Update the terms of L - if k < n { - for k2 in (k + 1)..n { - self.chol[(k2, k)] = (self.chol[(k2, k)] + sigma * s * temp[k2]) / c; - temp[k2] = c * temp[k2] - s * self.chol[(k2, k)]; + if j < n { + for k in (j + 1)..n { + temp[k] -= (wj / N::from_real(ljj)) * self.chol[(k, j)]; + if gamma != crate::zero::() { + self.chol[(k, j)] = N::from_real(nljj / ljj) * self.chol[(k, j)] + + (N::from_real(nljj * sigma / gamma) * N::conjugate(wj)) * temp[k]; + } } } } diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index bef5de95..b04ed402 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -79,10 +79,8 @@ macro_rules! gen_tests( } fn cholesky_rank_one_update(_n: usize) -> bool { - use nalgebra::dimension::U3; - use nalgebra::Vector3; - let mut m = RandomSDP::new(U3, || random::<$scalar>().0).unwrap(); - let x = Vector3::<$scalar>::new_random().map(|e| e.0); + let mut m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); + let x = Vector4::<$scalar>::new_random().map(|e| e.0); // TODO this is dirty but $scalar appears to not be a scalar type in this file let zero = random::<$scalar>().0 * 0.; @@ -96,11 +94,7 @@ macro_rules! gen_tests( let m_chol_updated = chol.l() * chol.l().adjoint(); // updates m manually - m.ger(sigma_scalar, &x, &x, one); // m += sigma * x * x.adjoint() - - println!("sigma : {}", sigma); - println!("m updated : {}", m); - println!("chol : {}", m_chol_updated); + m.gerc(sigma_scalar, &x, &x, one); // m += sigma * x * x.adjoint() relative_eq!(m, m_chol_updated, epsilon = 1.0e-7) } From 516155025a22ae3874c286431fa16391e644867a Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sat, 2 Nov 2019 19:04:07 +0100 Subject: [PATCH 16/67] code cleaned --- src/linalg/cholesky.rs | 38 +++++++++++++++++++++----------------- tests/linalg/cholesky.rs | 4 ++-- 2 files changed, 23 insertions(+), 19 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index c4049504..d0a9918c 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -147,7 +147,7 @@ where } /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, - /// performs a rank one update such that we end up with the decomposition of `M + sigma * v*v^*`. + /// performs a rank one update such that we end up with the decomposition of `M + sigma * v*v.adjoint()`. pub fn rank_one_update(&mut self, x: &Matrix, sigma: N::RealField) where S2: Storage, @@ -156,27 +156,31 @@ where { // for a description of the operation, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition // heavily inspired by Eigen's implementation https://eigen.tuxfamily.org/dox/LLT_8h_source.html - // TODO use unsafe { *matrix.get_unchecked((j, j)) } let n = x.nrows(); - let mut temp = x.clone_owned(); + let mut x = x.clone_owned(); let mut beta = crate::one::(); for j in 0..n { - let ljj = N::real(self.chol[(j, j)]); - let dj = ljj * ljj; - let wj = temp[j]; - let swj2 = sigma * N::modulus_squared(wj); - let gamma = dj * beta + swj2; - let nljj = (dj + swj2 / beta).sqrt(); - self.chol[(j, j)] = N::from_real(nljj); - beta += swj2 / dj; + let diag = N::real(unsafe { *self.chol.get_unchecked((j, j)) }); + let diag2 = diag * diag; + let xj = unsafe { *x.get_unchecked(j) }; + let sigma_xj2 = sigma * N::modulus_squared(xj); + let gamma = diag2 * beta + sigma_xj2; + let new_diag = (diag2 + sigma_xj2 / beta).sqrt(); + unsafe { *self.chol.get_unchecked_mut((j, j)) = N::from_real(new_diag) }; + beta += sigma_xj2 / diag2; // Update the terms of L if j < n { - for k in (j + 1)..n { - temp[k] -= (wj / N::from_real(ljj)) * self.chol[(k, j)]; - if gamma != crate::zero::() { - self.chol[(k, j)] = N::from_real(nljj / ljj) * self.chol[(k, j)] - + (N::from_real(nljj * sigma / gamma) * N::conjugate(wj)) * temp[k]; - } + let mut xjplus = x.rows_range_mut(j + 1..); + let mut col_j = self.chol.slice_range_mut(j + 1.., j); + // temp_jplus -= (wj / N::from_real(diag)) * col_j; + xjplus.axpy(-xj / N::from_real(diag), &col_j, N::one()); + if gamma != crate::zero::() { + // col_j = N::from_real(nljj / diag) * col_j + (N::from_real(nljj * sigma / gamma) * N::conjugate(wj)) * temp_jplus; + col_j.axpy( + N::from_real(new_diag * sigma / gamma) * N::conjugate(xj), + &xjplus, + N::from_real(new_diag / diag), + ); } } } diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index b04ed402..ea8402a3 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -82,13 +82,13 @@ macro_rules! gen_tests( let mut m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); let x = Vector4::<$scalar>::new_random().map(|e| e.0); - // TODO this is dirty but $scalar appears to not be a scalar type in this file + // this is dirty but $scalar is not a scalar type (its a Rand) in this file let zero = random::<$scalar>().0 * 0.; let one = zero + 1.; let sigma = random::(); // needs to be a real let sigma_scalar = zero + sigma; - // updates cholesky decomposition and reconstructs m + // updates cholesky decomposition and reconstructs m updated let mut chol = m.clone().cholesky().unwrap(); chol.rank_one_update(&x, sigma); let m_chol_updated = chol.l() * chol.l().adjoint(); From b2a50404b6577c5e9c4284e73b864a39e3f04338 Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sat, 2 Nov 2019 19:05:39 +0100 Subject: [PATCH 17/67] code cleaned --- src/linalg/cholesky.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index d0a9918c..0f453975 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -154,8 +154,7 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - // for a description of the operation, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition - // heavily inspired by Eigen's implementation https://eigen.tuxfamily.org/dox/LLT_8h_source.html + // heavily inspired by Eigen's `llt_rank_update_lower` implementation https://eigen.tuxfamily.org/dox/LLT_8h_source.html let n = x.nrows(); let mut x = x.clone_owned(); let mut beta = crate::one::(); From 3d170e1684237652e62adfb6dac0f9d6b1119652 Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sat, 2 Nov 2019 19:28:46 +0100 Subject: [PATCH 18/67] removed useless if --- src/linalg/cholesky.rs | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 0f453975..25e632c5 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -159,6 +159,7 @@ where let mut x = x.clone_owned(); let mut beta = crate::one::(); for j in 0..n { + // updates the diagonal let diag = N::real(unsafe { *self.chol.get_unchecked((j, j)) }); let diag2 = diag * diag; let xj = unsafe { *x.get_unchecked(j) }; @@ -167,20 +168,18 @@ where let new_diag = (diag2 + sigma_xj2 / beta).sqrt(); unsafe { *self.chol.get_unchecked_mut((j, j)) = N::from_real(new_diag) }; beta += sigma_xj2 / diag2; - // Update the terms of L - if j < n { - let mut xjplus = x.rows_range_mut(j + 1..); - let mut col_j = self.chol.slice_range_mut(j + 1.., j); - // temp_jplus -= (wj / N::from_real(diag)) * col_j; - xjplus.axpy(-xj / N::from_real(diag), &col_j, N::one()); - if gamma != crate::zero::() { - // col_j = N::from_real(nljj / diag) * col_j + (N::from_real(nljj * sigma / gamma) * N::conjugate(wj)) * temp_jplus; - col_j.axpy( - N::from_real(new_diag * sigma / gamma) * N::conjugate(xj), - &xjplus, - N::from_real(new_diag / diag), - ); - } + // updates the terms of L + let mut xjplus = x.rows_range_mut(j + 1..); + let mut col_j = self.chol.slice_range_mut(j + 1.., j); + // temp_jplus -= (wj / N::from_real(diag)) * col_j; + xjplus.axpy(-xj / N::from_real(diag), &col_j, N::one()); + if gamma != crate::zero::() { + // col_j = N::from_real(nljj / diag) * col_j + (N::from_real(nljj * sigma / gamma) * N::conjugate(wj)) * temp_jplus; + col_j.axpy( + N::from_real(new_diag * sigma / gamma) * N::conjugate(xj), + &xjplus, + N::from_real(new_diag / diag), + ); } } } From 2f18aee212a8f379e43700735a17e43f36a0302a Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sun, 3 Nov 2019 09:36:03 +0100 Subject: [PATCH 19/67] added assertion --- src/linalg/cholesky.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 25e632c5..0362f809 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -156,6 +156,11 @@ where { // heavily inspired by Eigen's `llt_rank_update_lower` implementation https://eigen.tuxfamily.org/dox/LLT_8h_source.html let n = x.nrows(); + assert_eq!( + n, + self.chol.nrows(), + "The input vector must be of the same size as the factorized matrix." + ); let mut x = x.clone_owned(); let mut beta = crate::one::(); for j in 0..n { From e583e37d1cc0537223a7973918be871b13d84437 Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sun, 3 Nov 2019 13:20:56 +0100 Subject: [PATCH 20/67] finally got the correct type for insert column --- src/linalg/cholesky.rs | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 0362f809..7623e247 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -6,8 +6,9 @@ use alga::general::ComplexField; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; -use crate::dimension::{Dim, DimSub, Dynamic, U1}; +use crate::dimension::{Dim, DimAdd, DimSum, DimSub, Dynamic, U1}; use crate::storage::{Storage, StorageMut}; +use crate::base::allocator::Reallocator; /// The Cholesky decomposition of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] @@ -188,6 +189,34 @@ where } } } + + /// Updates the decomposition such that we get the decomposition of a matrix with the given column `c` in the `j`th position. + /// Since the matrix is square, an identical row will be added in the `j`th row. + pub fn insert_column( + self, + j: usize, + c: &Matrix, + ) -> Cholesky> + where + D: DimAdd, + DefaultAllocator: Reallocator> + Reallocator, DimSum, DimSum>, + S2: Storage, + ShapeConstraint: SameNumberOfRows>, + { + let n = c.nrows(); + assert_eq!( + n, + self.chol.nrows() + 1, + "The new column must have the size of the factored matrix plus one." + ); + assert!(j < n, "j needs to be within the bound of the new matrix."); + // TODO what is the fastest way to produce the new matrix ? + let chol= self.chol.insert_column(j, N::zero()).insert_row(j, N::zero()); + + // TODO see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition + unimplemented!(); + Cholesky { chol } + } } impl, S: Storage> SquareMatrix From 498c6ef60b2a526cf719dc3a84f130edf8270ca5 Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sun, 3 Nov 2019 13:26:18 +0100 Subject: [PATCH 21/67] added template for remove_column --- src/linalg/cholesky.rs | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 7623e247..f63ab826 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -6,7 +6,7 @@ use alga::general::ComplexField; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; -use crate::dimension::{Dim, DimAdd, DimSum, DimSub, Dynamic, U1}; +use crate::dimension::{Dim, DimAdd, DimSum, DimDiff, DimSub, Dynamic, U1}; use crate::storage::{Storage, StorageMut}; use crate::base::allocator::Reallocator; @@ -217,6 +217,26 @@ where unimplemented!(); Cholesky { chol } } + + /// Updates the decomposition such that we get the decomposition of the factored matrix with its `j`th column removed. + /// Since the matrix is square, the `j`th row will also be removed. + pub fn remove_column( + self, + j: usize, + ) -> Cholesky> + where + D: DimSub, + DefaultAllocator: Reallocator> + Reallocator, DimDiff, DimDiff>, + { + let n = self.chol.nrows(); + assert!(j < n, "j needs to be within the bound of the matrix."); + // TODO what is the fastest way to produce the new matrix ? + let chol= self.chol.remove_column(j).remove_row(j); + + // TODO see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition + unimplemented!(); + Cholesky { chol } + } } impl, S: Storage> SquareMatrix From cfa7bbdc7c4e0ee65f75425380df3708381a6867 Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sun, 3 Nov 2019 14:33:35 +0100 Subject: [PATCH 22/67] remove column is now working --- src/linalg/cholesky.rs | 57 +++++++++++++++++++++++++++++++++++++--- tests/linalg/cholesky.rs | 19 ++++++++++++++ 2 files changed, 72 insertions(+), 4 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index f63ab826..e6a072de 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -211,7 +211,7 @@ where ); assert!(j < n, "j needs to be within the bound of the new matrix."); // TODO what is the fastest way to produce the new matrix ? - let chol= self.chol.insert_column(j, N::zero()).insert_row(j, N::zero()); + let chol= self.chol.clone().insert_column(j, N::zero()).insert_row(j, N::zero()); // TODO see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition unimplemented!(); @@ -229,12 +229,16 @@ where DefaultAllocator: Reallocator> + Reallocator, DimDiff, DimDiff>, { let n = self.chol.nrows(); + assert!(n > 0, "The matrix needs at least one column."); assert!(j < n, "j needs to be within the bound of the matrix."); // TODO what is the fastest way to produce the new matrix ? - let chol= self.chol.remove_column(j).remove_row(j); + let mut chol= self.chol.clone().remove_column(j).remove_row(j); + + // updates the corner + let mut corner = chol.slice_range_mut(j.., j..); + let colj = self.chol.slice_range(j+1.., j); + rank_one_update_helper(&mut corner, &colj, N::real(N::one())); - // TODO see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition - unimplemented!(); Cholesky { chol } } } @@ -251,3 +255,48 @@ where Cholesky::new(self.into_owned()) } } + +/// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, +/// performs a rank one update such that we end up with the decomposition of `M + sigma * v*v.adjoint()`. +fn rank_one_update_helper(chol : &mut Matrix, x: &Matrix, sigma: N::RealField) + where + N: ComplexField, D: DimSub, R2: Dim, + S: StorageMut, + S2: Storage, + DefaultAllocator: Allocator + Allocator, + ShapeConstraint: SameNumberOfRows, +{ + // heavily inspired by Eigen's `llt_rank_update_lower` implementation https://eigen.tuxfamily.org/dox/LLT_8h_source.html + let n = x.nrows(); + assert_eq!( + n, + chol.nrows(), + "The input vector must be of the same size as the factorized matrix." + ); + let mut x = x.clone_owned(); + let mut beta = crate::one::(); + for j in 0..n { + // updates the diagonal + let diag = N::real(unsafe { *chol.get_unchecked((j, j)) }); + let diag2 = diag * diag; + let xj = unsafe { *x.get_unchecked(j) }; + let sigma_xj2 = sigma * N::modulus_squared(xj); + let gamma = diag2 * beta + sigma_xj2; + let new_diag = (diag2 + sigma_xj2 / beta).sqrt(); + unsafe { *chol.get_unchecked_mut((j, j)) = N::from_real(new_diag) }; + beta += sigma_xj2 / diag2; + // updates the terms of L + let mut xjplus = x.rows_range_mut(j + 1..); + let mut col_j = chol.slice_range_mut(j + 1.., j); + // temp_jplus -= (wj / N::from_real(diag)) * col_j; + xjplus.axpy(-xj / N::from_real(diag), &col_j, N::one()); + if gamma != crate::zero::() { + // col_j = N::from_real(nljj / diag) * col_j + (N::from_real(nljj * sigma / gamma) * N::conjugate(wj)) * temp_jplus; + col_j.axpy( + N::from_real(new_diag * sigma / gamma) * N::conjugate(xj), + &xjplus, + N::from_real(new_diag / diag), + ); + } + } +} \ No newline at end of file diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index ea8402a3..aa411564 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -98,6 +98,25 @@ macro_rules! gen_tests( relative_eq!(m, m_chol_updated, epsilon = 1.0e-7) } + + fn cholesky_remove_column(n: usize) -> bool { + let n = n.max(1).min(5); + let j = random::() % n; + let m = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap(); + + // remove column from cholesky decomposition and rebuild m + let chol = m.clone().cholesky().unwrap().remove_column(j); + let m_chol_updated = chol.l() * chol.l().adjoint(); + + // remove column from m + let m_updated = m.remove_column(j).remove_row(j); + + println!("n={} j={}", n, j); + println!("chol:{}", m_chol_updated); + println!("m up:{}", m_updated); + + relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7) + } } } } From b29231cf7b2909243a5bc58bd45d2a1fa38f248d Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sun, 3 Nov 2019 15:17:20 +0100 Subject: [PATCH 23/67] found uneeded storagemut --- src/linalg/cholesky.rs | 26 +++++++++++++++++++++----- src/linalg/solve.rs | 12 ++++++------ tests/linalg/cholesky.rs | 24 ++++++++++++++++++++---- 3 files changed, 47 insertions(+), 15 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index e6a072de..45d232f2 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -195,7 +195,7 @@ where pub fn insert_column( self, j: usize, - c: &Matrix, + col: &Matrix, ) -> Cholesky> where D: DimAdd, @@ -203,7 +203,7 @@ where S2: Storage, ShapeConstraint: SameNumberOfRows>, { - let n = c.nrows(); + let n = col.nrows(); assert_eq!( n, self.chol.nrows() + 1, @@ -211,10 +211,26 @@ where ); assert!(j < n, "j needs to be within the bound of the new matrix."); // TODO what is the fastest way to produce the new matrix ? - let chol= self.chol.clone().insert_column(j, N::zero()).insert_row(j, N::zero()); + // TODO check for adjoint problems + let mut chol= self.chol.clone().insert_column(j, N::zero()).insert_row(j, N::zero()); + + // update the top center element S12 + let top_left_corner = chol.slice_range(..j-1, ..j-1); + let colj = col.rows_range(..j-1); // clone_owned needed to get storage mut for b in solve + let new_colj = top_left_corner.ad_solve_lower_triangular(&colj).unwrap(); + chol.slice_range_mut(..j-1, j).copy_from(&new_colj); + + // update the center element S22 + let rowj = chol.slice_range(j, ..j-1); + let center_element = N::sqrt(col[j] + rowj.dot(&rowj.adjoint())); // TODO is there a better way to multiply a vector by its adjoint ? norm_squared ? + chol[(j,j)] = center_element; + + // update the right center element S23 + //chol.slice_range_mut(j+1.., j).copy_from(&new_rowj); + + // update the bottom right corner // TODO see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition - unimplemented!(); Cholesky { chol } } @@ -234,7 +250,7 @@ where // TODO what is the fastest way to produce the new matrix ? let mut chol= self.chol.clone().remove_column(j).remove_row(j); - // updates the corner + // updates the bottom right corner let mut corner = chol.slice_range_mut(j.., j..); let colj = self.chol.slice_range(j+1.., j); rank_one_update_helper(&mut corner, &colj, N::real(N::one())); diff --git a/src/linalg/solve.rs b/src/linalg/solve.rs index f10b1d00..a6b9196f 100644 --- a/src/linalg/solve.rs +++ b/src/linalg/solve.rs @@ -15,7 +15,7 @@ impl> SquareMatrix { b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { @@ -35,7 +35,7 @@ impl> SquareMatrix { b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { @@ -191,7 +191,7 @@ impl> SquareMatrix { b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { @@ -211,7 +211,7 @@ impl> SquareMatrix { b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { @@ -273,7 +273,7 @@ impl> SquareMatrix { b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { @@ -293,7 +293,7 @@ impl> SquareMatrix { b: &Matrix, ) -> Option> where - S2: StorageMut, + S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index aa411564..e3e5fdc7 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -99,6 +99,26 @@ macro_rules! gen_tests( relative_eq!(m, m_chol_updated, epsilon = 1.0e-7) } + fn cholesky_insert_column(n: usize) -> bool { + let n = n.max(1).min(5); + let j = random::() % n; + let m_updated = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap(); + + // build m and col from m_updated + let col = m_updated.column(j); + let m = m_updated.clone().remove_column(j).remove_row(j); + + // remove column from cholesky decomposition and rebuild m + let chol = m.clone().cholesky().unwrap().insert_column(j, &col); + let m_chol_updated = chol.l() * chol.l().adjoint(); + + println!("n={} j={}", n, j); + println!("chol updated:{}", m_chol_updated); + println!("m updated:{}", m_updated); + + relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7) + } + fn cholesky_remove_column(n: usize) -> bool { let n = n.max(1).min(5); let j = random::() % n; @@ -111,10 +131,6 @@ macro_rules! gen_tests( // remove column from m let m_updated = m.remove_column(j).remove_row(j); - println!("n={} j={}", n, j); - println!("chol:{}", m_chol_updated); - println!("m up:{}", m_updated); - relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7) } } From c613360a5c46025577c4f95062e1abdf89076b09 Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sun, 3 Nov 2019 15:43:49 +0100 Subject: [PATCH 24/67] insert does not compile yet --- src/linalg/cholesky.rs | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 45d232f2..755f5610 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -6,7 +6,7 @@ use alga::general::ComplexField; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; -use crate::dimension::{Dim, DimAdd, DimSum, DimDiff, DimSub, Dynamic, U1}; +use crate::dimension::{Dim, DimName, DimAdd, DimSum, DimDiff, DimSub, Dynamic, U1}; use crate::storage::{Storage, StorageMut}; use crate::base::allocator::Reallocator; @@ -214,21 +214,25 @@ where // TODO check for adjoint problems let mut chol= self.chol.clone().insert_column(j, N::zero()).insert_row(j, N::zero()); - // update the top center element S12 + // update the jth row let top_left_corner = chol.slice_range(..j-1, ..j-1); - let colj = col.rows_range(..j-1); // clone_owned needed to get storage mut for b in solve - let new_colj = top_left_corner.ad_solve_lower_triangular(&colj).unwrap(); - chol.slice_range_mut(..j-1, j).copy_from(&new_colj); + let colj_minus = col.rows_range(..j-1); + let rowj = top_left_corner.solve_lower_triangular(&colj_minus).unwrap().adjoint(); // TODO both the row and its adjoint seem to be usefull + chol.slice_range_mut(j, ..j-1).copy_from(&rowj); - // update the center element S22 - let rowj = chol.slice_range(j, ..j-1); + // update the center element let center_element = N::sqrt(col[j] + rowj.dot(&rowj.adjoint())); // TODO is there a better way to multiply a vector by its adjoint ? norm_squared ? chol[(j,j)] = center_element; - // update the right center element S23 - //chol.slice_range_mut(j+1.., j).copy_from(&new_rowj); + // update the jth column + let colj_plus = col.rows_range(j+1..).adjoint(); + let bottom_left_corner = chol.slice_range(j+1, ..j-1); + let colj = (colj_plus - bottom_left_corner*rowj.adjoint()) / center_element; + chol.slice_range_mut(j+1.., j).copy_from(&colj); // update the bottom right corner + let mut bottom_right_corner = chol.slice_range_mut(j.., j..); + rank_one_update_helper(&mut bottom_right_corner, &colj, -N::real(N::one())); // TODO see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition Cholesky { chol } @@ -276,7 +280,9 @@ where /// performs a rank one update such that we end up with the decomposition of `M + sigma * v*v.adjoint()`. fn rank_one_update_helper(chol : &mut Matrix, x: &Matrix, sigma: N::RealField) where - N: ComplexField, D: DimSub, R2: Dim, + N: ComplexField, + D: DimSub, + R2: Dim, S: StorageMut, S2: Storage, DefaultAllocator: Allocator + Allocator, From 27a204538932b29636b2af99c45da40b3ba2348e Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sun, 3 Nov 2019 18:02:27 +0100 Subject: [PATCH 25/67] insert does compile --- src/linalg/cholesky.rs | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 755f5610..0485e9e5 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -4,9 +4,9 @@ use serde::{Deserialize, Serialize}; use alga::general::ComplexField; use crate::allocator::Allocator; -use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix}; +use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix, Vector}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; -use crate::dimension::{Dim, DimName, DimAdd, DimSum, DimDiff, DimSub, Dynamic, U1}; +use crate::dimension::{Dim, DimAdd, DimSum, DimDiff, DimSub, Dynamic, U1}; use crate::storage::{Storage, StorageMut}; use crate::base::allocator::Reallocator; @@ -149,7 +149,7 @@ where /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, /// performs a rank one update such that we end up with the decomposition of `M + sigma * v*v.adjoint()`. - pub fn rank_one_update(&mut self, x: &Matrix, sigma: N::RealField) + pub fn rank_one_update(&mut self, x: &Vector, sigma: N::RealField) where S2: Storage, DefaultAllocator: Allocator, @@ -192,17 +192,19 @@ where /// Updates the decomposition such that we get the decomposition of a matrix with the given column `c` in the `j`th position. /// Since the matrix is square, an identical row will be added in the `j`th row. - pub fn insert_column( + pub fn insert_column( self, j: usize, - col: &Matrix, + col: &Vector, ) -> Cholesky> where D: DimAdd, - DefaultAllocator: Reallocator> + Reallocator, DimSum, DimSum>, + R2: Dim, S2: Storage, + DefaultAllocator: Reallocator> + Reallocator, DimSum, DimSum>, ShapeConstraint: SameNumberOfRows>, { + // for an explanation of the formulas, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition let n = col.nrows(); assert_eq!( n, @@ -211,7 +213,6 @@ where ); assert!(j < n, "j needs to be within the bound of the new matrix."); // TODO what is the fastest way to produce the new matrix ? - // TODO check for adjoint problems let mut chol= self.chol.clone().insert_column(j, N::zero()).insert_row(j, N::zero()); // update the jth row @@ -225,16 +226,15 @@ where chol[(j,j)] = center_element; // update the jth column - let colj_plus = col.rows_range(j+1..).adjoint(); - let bottom_left_corner = chol.slice_range(j+1, ..j-1); - let colj = (colj_plus - bottom_left_corner*rowj.adjoint()) / center_element; + let colj_plus = col.rows_range(j+1..); + let bottom_left_corner = chol.slice_range(j+1.., ..j-1); + let colj = (colj_plus - bottom_left_corner*rowj.adjoint()) / center_element; // TODO that can probably be done with a single optimized operation chol.slice_range_mut(j+1.., j).copy_from(&colj); // update the bottom right corner - let mut bottom_right_corner = chol.slice_range_mut(j.., j..); + let mut bottom_right_corner = chol.slice_range_mut(j+1.., j+1..); rank_one_update_helper(&mut bottom_right_corner, &colj, -N::real(N::one())); - // TODO see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition Cholesky { chol } } @@ -278,15 +278,14 @@ where /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, /// performs a rank one update such that we end up with the decomposition of `M + sigma * v*v.adjoint()`. -fn rank_one_update_helper(chol : &mut Matrix, x: &Matrix, sigma: N::RealField) +fn rank_one_update_helper(chol : &mut Matrix, x: &Vector, sigma: N::RealField) where N: ComplexField, - D: DimSub, - R2: Dim, + D: Dim, + Rx: Dim, S: StorageMut, - S2: Storage, - DefaultAllocator: Allocator + Allocator, - ShapeConstraint: SameNumberOfRows, + Sx: Storage, + DefaultAllocator: Allocator, { // heavily inspired by Eigen's `llt_rank_update_lower` implementation https://eigen.tuxfamily.org/dox/LLT_8h_source.html let n = x.nrows(); From f54faedc3201708cc31b1807aa077867ccaccbbe Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sun, 3 Nov 2019 18:48:04 +0100 Subject: [PATCH 26/67] tests pass, needs cleanup --- src/linalg/cholesky.rs | 13 ++++++++----- tests/linalg/cholesky.rs | 2 +- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 0485e9e5..bbd233eb 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -216,18 +216,21 @@ where let mut chol= self.chol.clone().insert_column(j, N::zero()).insert_row(j, N::zero()); // update the jth row - let top_left_corner = chol.slice_range(..j-1, ..j-1); - let colj_minus = col.rows_range(..j-1); + let top_left_corner = chol.slice_range(..j, ..j); + let colj_minus = col.rows_range(..j); let rowj = top_left_corner.solve_lower_triangular(&colj_minus).unwrap().adjoint(); // TODO both the row and its adjoint seem to be usefull - chol.slice_range_mut(j, ..j-1).copy_from(&rowj); + chol.slice_range_mut(j, ..j).copy_from(&rowj); + + // TODO + //println!("dotc:{} norm2:{}", rowj.dotc(&rowj), rowj.norm_squared()); // update the center element - let center_element = N::sqrt(col[j] + rowj.dot(&rowj.adjoint())); // TODO is there a better way to multiply a vector by its adjoint ? norm_squared ? + let center_element = N::sqrt(col[j] - rowj.dotc(&rowj) ); chol[(j,j)] = center_element; // update the jth column let colj_plus = col.rows_range(j+1..); - let bottom_left_corner = chol.slice_range(j+1.., ..j-1); + let bottom_left_corner = chol.slice_range(j+1.., ..j); let colj = (colj_plus - bottom_left_corner*rowj.adjoint()) / center_element; // TODO that can probably be done with a single optimized operation chol.slice_range_mut(j+1.., j).copy_from(&colj); diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index e3e5fdc7..e94cd80f 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -100,7 +100,7 @@ macro_rules! gen_tests( } fn cholesky_insert_column(n: usize) -> bool { - let n = n.max(1).min(5); + let n = n.max(1).min(50); let j = random::() % n; let m_updated = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap(); From 3d08a80d8d8a043481c751425fb69984c32d0ce7 Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sun, 3 Nov 2019 20:00:15 +0100 Subject: [PATCH 27/67] needs faster matrix initialization --- src/linalg/cholesky.rs | 80 +++++++++++++----------------------------- 1 file changed, 24 insertions(+), 56 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index bbd233eb..12674e4c 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -149,48 +149,17 @@ where /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, /// performs a rank one update such that we end up with the decomposition of `M + sigma * v*v.adjoint()`. + #[inline] pub fn rank_one_update(&mut self, x: &Vector, sigma: N::RealField) where S2: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - // heavily inspired by Eigen's `llt_rank_update_lower` implementation https://eigen.tuxfamily.org/dox/LLT_8h_source.html - let n = x.nrows(); - assert_eq!( - n, - self.chol.nrows(), - "The input vector must be of the same size as the factorized matrix." - ); - let mut x = x.clone_owned(); - let mut beta = crate::one::(); - for j in 0..n { - // updates the diagonal - let diag = N::real(unsafe { *self.chol.get_unchecked((j, j)) }); - let diag2 = diag * diag; - let xj = unsafe { *x.get_unchecked(j) }; - let sigma_xj2 = sigma * N::modulus_squared(xj); - let gamma = diag2 * beta + sigma_xj2; - let new_diag = (diag2 + sigma_xj2 / beta).sqrt(); - unsafe { *self.chol.get_unchecked_mut((j, j)) = N::from_real(new_diag) }; - beta += sigma_xj2 / diag2; - // updates the terms of L - let mut xjplus = x.rows_range_mut(j + 1..); - let mut col_j = self.chol.slice_range_mut(j + 1.., j); - // temp_jplus -= (wj / N::from_real(diag)) * col_j; - xjplus.axpy(-xj / N::from_real(diag), &col_j, N::one()); - if gamma != crate::zero::() { - // col_j = N::from_real(nljj / diag) * col_j + (N::from_real(nljj * sigma / gamma) * N::conjugate(wj)) * temp_jplus; - col_j.axpy( - N::from_real(new_diag * sigma / gamma) * N::conjugate(xj), - &xjplus, - N::from_real(new_diag / diag), - ); - } - } + rank_one_update(&mut self.chol, x, sigma) } - /// Updates the decomposition such that we get the decomposition of a matrix with the given column `c` in the `j`th position. + /// Updates the decomposition such that we get the decomposition of a matrix with the given column `col` in the `j`th position. /// Since the matrix is square, an identical row will be added in the `j`th row. pub fn insert_column( self, @@ -206,37 +175,32 @@ where { // for an explanation of the formulas, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition let n = col.nrows(); - assert_eq!( - n, - self.chol.nrows() + 1, - "The new column must have the size of the factored matrix plus one." - ); + assert_eq!(n, self.chol.nrows() + 1, "The new column must have the size of the factored matrix plus one."); assert!(j < n, "j needs to be within the bound of the new matrix."); + // TODO what is the fastest way to produce the new matrix ? let mut chol= self.chol.clone().insert_column(j, N::zero()).insert_row(j, N::zero()); // update the jth row - let top_left_corner = chol.slice_range(..j, ..j); - let colj_minus = col.rows_range(..j); - let rowj = top_left_corner.solve_lower_triangular(&colj_minus).unwrap().adjoint(); // TODO both the row and its adjoint seem to be usefull - chol.slice_range_mut(j, ..j).copy_from(&rowj); - - // TODO - //println!("dotc:{} norm2:{}", rowj.dotc(&rowj), rowj.norm_squared()); + let top_left_corner = self.chol.slice_range(..j, ..j); + let col_jminus = col.rows_range(..j); + let new_rowj_adjoint = top_left_corner.solve_lower_triangular(&col_jminus).expect("Cholesky::insert_column : Unable to solve lower triangular system!"); + new_rowj_adjoint.adjoint_to(&mut chol.slice_range_mut(j, ..j)); // update the center element - let center_element = N::sqrt(col[j] - rowj.dotc(&rowj) ); + let center_element = N::sqrt(col[j] - N::from_real(new_rowj_adjoint.norm_squared())); chol[(j,j)] = center_element; // update the jth column - let colj_plus = col.rows_range(j+1..); - let bottom_left_corner = chol.slice_range(j+1.., ..j); - let colj = (colj_plus - bottom_left_corner*rowj.adjoint()) / center_element; // TODO that can probably be done with a single optimized operation - chol.slice_range_mut(j+1.., j).copy_from(&colj); + let bottom_left_corner = self.chol.slice_range(j.., ..j); + // new_colj = (col_jplus - bottom_left_corner * new_rowj.adjoint()) / center_element; + let mut new_colj = col.rows_range(j+1..).clone_owned(); + new_colj.gemm(-N::one() / center_element, &bottom_left_corner, &new_rowj_adjoint, N::one() / center_element ); + chol.slice_range_mut(j+1.., j).copy_from(&new_colj); // update the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j+1.., j+1..); - rank_one_update_helper(&mut bottom_right_corner, &colj, -N::real(N::one())); + rank_one_update(&mut bottom_right_corner, &new_colj, -N::real(N::one())); Cholesky { chol } } @@ -254,13 +218,14 @@ where let n = self.chol.nrows(); assert!(n > 0, "The matrix needs at least one column."); assert!(j < n, "j needs to be within the bound of the matrix."); + // TODO what is the fastest way to produce the new matrix ? let mut chol= self.chol.clone().remove_column(j).remove_row(j); // updates the bottom right corner - let mut corner = chol.slice_range_mut(j.., j..); - let colj = self.chol.slice_range(j+1.., j); - rank_one_update_helper(&mut corner, &colj, N::real(N::one())); + let mut bottom_right_corner = chol.slice_range_mut(j.., j..); + let old_colj = self.chol.slice_range(j+1.., j); + rank_one_update(&mut bottom_right_corner, &old_colj, N::real(N::one())); Cholesky { chol } } @@ -281,7 +246,10 @@ where /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, /// performs a rank one update such that we end up with the decomposition of `M + sigma * v*v.adjoint()`. -fn rank_one_update_helper(chol : &mut Matrix, x: &Vector, sigma: N::RealField) +/// +/// This helper method is calling for by `rank_one_update` but also `insert_column` and `remove_column` +/// where it is used on a square slice of the decomposition +fn rank_one_update(chol : &mut Matrix, x: &Vector, sigma: N::RealField) where N: ComplexField, D: Dim, From 59c6a9861563fd70533942365ce0360903898849 Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sun, 3 Nov 2019 21:24:44 +0100 Subject: [PATCH 28/67] finished cleaning --- src/linalg/cholesky.rs | 127 +++++++++++++++++++++------------------ tests/linalg/cholesky.rs | 8 +-- 2 files changed, 69 insertions(+), 66 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 12674e4c..262d1225 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -8,7 +8,6 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, SquareMatrix, Vec use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimAdd, DimSum, DimDiff, DimSub, Dynamic, U1}; use crate::storage::{Storage, StorageMut}; -use crate::base::allocator::Reallocator; /// The Cholesky decomposition of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] @@ -156,7 +155,7 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - rank_one_update(&mut self.chol, x, sigma) + Self::xx_rank_one_update(&mut self.chol, x, sigma) } /// Updates the decomposition such that we get the decomposition of a matrix with the given column `col` in the `j`th position. @@ -170,7 +169,7 @@ where D: DimAdd, R2: Dim, S2: Storage, - DefaultAllocator: Reallocator> + Reallocator, DimSum, DimSum>, + DefaultAllocator: Allocator, DimSum>, ShapeConstraint: SameNumberOfRows>, { // for an explanation of the formulas, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition @@ -178,8 +177,12 @@ where assert_eq!(n, self.chol.nrows() + 1, "The new column must have the size of the factored matrix plus one."); assert!(j < n, "j needs to be within the bound of the new matrix."); - // TODO what is the fastest way to produce the new matrix ? - let mut chol= self.chol.clone().insert_column(j, N::zero()).insert_row(j, N::zero()); + // loads the data into a new matrix with an additional jth row/column + let mut chol = unsafe { Matrix::new_uninitialized_generic(self.chol.data.shape().0.add(U1), self.chol.data.shape().1.add(U1)) }; + chol.slice_range_mut(..j, ..j).copy_from(&self.chol.slice_range(..j, ..j)); + chol.slice_range_mut(..j, j+1..).copy_from(&self.chol.slice_range(..j, j..)); + chol.slice_range_mut(j+1.., ..j).copy_from(&self.chol.slice_range(j.., ..j)); + chol.slice_range_mut(j+1.., j+1..).copy_from(&self.chol.slice_range(j.., j..)); // update the jth row let top_left_corner = self.chol.slice_range(..j, ..j); @@ -200,7 +203,7 @@ where // update the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j+1.., j+1..); - rank_one_update(&mut bottom_right_corner, &new_colj, -N::real(N::one())); + Self::xx_rank_one_update(&mut bottom_right_corner, &new_colj, -N::real(N::one())); Cholesky { chol } } @@ -208,27 +211,80 @@ where /// Updates the decomposition such that we get the decomposition of the factored matrix with its `j`th column removed. /// Since the matrix is square, the `j`th row will also be removed. pub fn remove_column( - self, + &self, j: usize, ) -> Cholesky> where D: DimSub, - DefaultAllocator: Reallocator> + Reallocator, DimDiff, DimDiff>, + DefaultAllocator: Allocator, DimDiff> { let n = self.chol.nrows(); assert!(n > 0, "The matrix needs at least one column."); assert!(j < n, "j needs to be within the bound of the matrix."); - // TODO what is the fastest way to produce the new matrix ? - let mut chol= self.chol.clone().remove_column(j).remove_row(j); + // loads the data into a new matrix except for the jth row/column + let mut chol = unsafe { Matrix::new_uninitialized_generic(self.chol.data.shape().0.sub(U1), self.chol.data.shape().1.sub(U1)) }; + chol.slice_range_mut(..j, ..j).copy_from(&self.chol.slice_range(..j, ..j)); + chol.slice_range_mut(..j, j..).copy_from(&self.chol.slice_range(..j, j+1..)); + chol.slice_range_mut(j.., ..j).copy_from(&self.chol.slice_range(j+1.., ..j)); + chol.slice_range_mut(j.., j..).copy_from(&self.chol.slice_range(j+1.., j+1..)); // updates the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j.., j..); let old_colj = self.chol.slice_range(j+1.., j); - rank_one_update(&mut bottom_right_corner, &old_colj, N::real(N::one())); + Self::xx_rank_one_update(&mut bottom_right_corner, &old_colj, N::real(N::one())); Cholesky { chol } } + + /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, + /// performs a rank one update such that we end up with the decomposition of `M + sigma * v*v.adjoint()`. + /// + /// This helper method is calling for by `rank_one_update` but also `insert_column` and `remove_column` + /// where it is used on a square slice of the decomposition + fn xx_rank_one_update(chol : &mut Matrix, x: &Vector, sigma: N::RealField) + where + //N: ComplexField, + Dm: Dim, + Rx: Dim, + Sm: StorageMut, + Sx: Storage, + DefaultAllocator: Allocator, + { + // heavily inspired by Eigen's `llt_rank_update_lower` implementation https://eigen.tuxfamily.org/dox/LLT_8h_source.html + let n = x.nrows(); + assert_eq!( + n, + chol.nrows(), + "The input vector must be of the same size as the factorized matrix." + ); + let mut x = x.clone_owned(); + let mut beta = crate::one::(); + for j in 0..n { + // updates the diagonal + let diag = N::real(unsafe { *chol.get_unchecked((j, j)) }); + let diag2 = diag * diag; + let xj = unsafe { *x.get_unchecked(j) }; + let sigma_xj2 = sigma * N::modulus_squared(xj); + let gamma = diag2 * beta + sigma_xj2; + let new_diag = (diag2 + sigma_xj2 / beta).sqrt(); + unsafe { *chol.get_unchecked_mut((j, j)) = N::from_real(new_diag) }; + beta += sigma_xj2 / diag2; + // updates the terms of L + let mut xjplus = x.rows_range_mut(j + 1..); + let mut col_j = chol.slice_range_mut(j + 1.., j); + // temp_jplus -= (wj / N::from_real(diag)) * col_j; + xjplus.axpy(-xj / N::from_real(diag), &col_j, N::one()); + if gamma != crate::zero::() { + // col_j = N::from_real(nljj / diag) * col_j + (N::from_real(nljj * sigma / gamma) * N::conjugate(wj)) * temp_jplus; + col_j.axpy( + N::from_real(new_diag * sigma / gamma) * N::conjugate(xj), + &xjplus, + N::from_real(new_diag / diag), + ); + } + } + } } impl, S: Storage> SquareMatrix @@ -243,52 +299,3 @@ where Cholesky::new(self.into_owned()) } } - -/// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, -/// performs a rank one update such that we end up with the decomposition of `M + sigma * v*v.adjoint()`. -/// -/// This helper method is calling for by `rank_one_update` but also `insert_column` and `remove_column` -/// where it is used on a square slice of the decomposition -fn rank_one_update(chol : &mut Matrix, x: &Vector, sigma: N::RealField) - where - N: ComplexField, - D: Dim, - Rx: Dim, - S: StorageMut, - Sx: Storage, - DefaultAllocator: Allocator, -{ - // heavily inspired by Eigen's `llt_rank_update_lower` implementation https://eigen.tuxfamily.org/dox/LLT_8h_source.html - let n = x.nrows(); - assert_eq!( - n, - chol.nrows(), - "The input vector must be of the same size as the factorized matrix." - ); - let mut x = x.clone_owned(); - let mut beta = crate::one::(); - for j in 0..n { - // updates the diagonal - let diag = N::real(unsafe { *chol.get_unchecked((j, j)) }); - let diag2 = diag * diag; - let xj = unsafe { *x.get_unchecked(j) }; - let sigma_xj2 = sigma * N::modulus_squared(xj); - let gamma = diag2 * beta + sigma_xj2; - let new_diag = (diag2 + sigma_xj2 / beta).sqrt(); - unsafe { *chol.get_unchecked_mut((j, j)) = N::from_real(new_diag) }; - beta += sigma_xj2 / diag2; - // updates the terms of L - let mut xjplus = x.rows_range_mut(j + 1..); - let mut col_j = chol.slice_range_mut(j + 1.., j); - // temp_jplus -= (wj / N::from_real(diag)) * col_j; - xjplus.axpy(-xj / N::from_real(diag), &col_j, N::one()); - if gamma != crate::zero::() { - // col_j = N::from_real(nljj / diag) * col_j + (N::from_real(nljj * sigma / gamma) * N::conjugate(wj)) * temp_jplus; - col_j.axpy( - N::from_real(new_diag * sigma / gamma) * N::conjugate(xj), - &xjplus, - N::from_real(new_diag / diag), - ); - } - } -} \ No newline at end of file diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index e94cd80f..5f10339d 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -100,7 +100,7 @@ macro_rules! gen_tests( } fn cholesky_insert_column(n: usize) -> bool { - let n = n.max(1).min(50); + let n = n.max(1).min(10); let j = random::() % n; let m_updated = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap(); @@ -112,15 +112,11 @@ macro_rules! gen_tests( let chol = m.clone().cholesky().unwrap().insert_column(j, &col); let m_chol_updated = chol.l() * chol.l().adjoint(); - println!("n={} j={}", n, j); - println!("chol updated:{}", m_chol_updated); - println!("m updated:{}", m_updated); - relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7) } fn cholesky_remove_column(n: usize) -> bool { - let n = n.max(1).min(5); + let n = n.max(1).min(10); let j = random::() % n; let m = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap(); From 667c49d0e18e9bd6d561dc71b73004daee3faae1 Mon Sep 17 00:00:00 2001 From: daingun Date: Fri, 1 Nov 2019 22:12:59 +0100 Subject: [PATCH 29/67] Correct Schur decomposition for 2x2 matrices Due to rounding and possible loss of precision the lower left element of the 2x2 matrix may be different from zero. --- src/linalg/schur.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index b31be9f6..2a2bb250 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -413,6 +413,7 @@ where let inv_rot = rot.inverse(); inv_rot.rotate(&mut m); rot.rotate_rows(&mut m); + m[(1, 0)] = N::zero(); if compute_q { // XXX: we have to build the matrix manually because From 50417494ecae77b2b47e1abd389e7d68d995c511 Mon Sep 17 00:00:00 2001 From: daingun Date: Fri, 1 Nov 2019 23:27:08 +0100 Subject: [PATCH 30/67] Use same algorithm to solve 2x2 eigenvalue problem The eigenvalue problem is solved in two different method that use different methods to calculate the discriminant of the solution to the quadratic equation. Use the method whose computation is considered more stable. --- src/linalg/schur.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index 2a2bb250..5cd90a94 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -309,16 +309,17 @@ where let hmn = t[(m, n)]; let hnn = t[(n, n)]; - let tra = hnn + hmm; - let det = hnn * hmm - hnm * hmn; - let discr = tra * tra * crate::convert(0.25) - det; + // NOTE: use the same algorithm as in compute_2x2_eigvals. + let val = (hmm - hnn) * crate::convert(0.5); + let discr = hnm * hmn + val * val; // All 2x2 blocks have negative discriminant because we already decoupled those - // with positive eigenvalues.. + // with positive eigenvalues. let sqrt_discr = NumComplex::new(N::zero(), (-discr).sqrt()); - out[m] = NumComplex::new(tra * crate::convert(0.5), N::zero()) + sqrt_discr; - out[m + 1] = NumComplex::new(tra * crate::convert(0.5), N::zero()) - sqrt_discr; + let half_tra = (hnn + hmm) * crate::convert(0.5); + out[m] = NumComplex::new(half_tra, N::zero()) + sqrt_discr; + out[m + 1] = NumComplex::new(half_tra, N::zero()) - sqrt_discr; m += 2; } @@ -413,7 +414,6 @@ where let inv_rot = rot.inverse(); inv_rot.rotate(&mut m); rot.rotate_rows(&mut m); - m[(1, 0)] = N::zero(); if compute_q { // XXX: we have to build the matrix manually because From b96159aab3c3f6240d12d16280f00075e0736dc7 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Sun, 17 Nov 2019 13:10:50 +0100 Subject: [PATCH 31/67] Fix Cholesky for no-std platforms. --- src/linalg/cholesky.rs | 43 ++++++++++++++++++++++------------------ tests/linalg/cholesky.rs | 2 +- 2 files changed, 25 insertions(+), 20 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 262d1225..111f5680 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -1,6 +1,7 @@ #[cfg(feature = "serde-serialize")] use serde::{Deserialize, Serialize}; +use num::One; use alga::general::ComplexField; use crate::allocator::Allocator; @@ -155,23 +156,24 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - Self::xx_rank_one_update(&mut self.chol, x, sigma) + Self::xx_rank_one_update(&mut self.chol, &mut x.clone_owned(), sigma) } /// Updates the decomposition such that we get the decomposition of a matrix with the given column `col` in the `j`th position. /// Since the matrix is square, an identical row will be added in the `j`th row. pub fn insert_column( - self, + &self, j: usize, - col: &Vector, + col: Vector, ) -> Cholesky> where D: DimAdd, R2: Dim, S2: Storage, - DefaultAllocator: Allocator, DimSum>, + DefaultAllocator: Allocator, DimSum> + Allocator, ShapeConstraint: SameNumberOfRows>, { + let mut col = col.into_owned(); // for an explanation of the formulas, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition let n = col.nrows(); assert_eq!(n, self.chol.nrows() + 1, "The new column must have the size of the factored matrix plus one."); @@ -186,24 +188,26 @@ where // update the jth row let top_left_corner = self.chol.slice_range(..j, ..j); - let col_jminus = col.rows_range(..j); - let new_rowj_adjoint = top_left_corner.solve_lower_triangular(&col_jminus).expect("Cholesky::insert_column : Unable to solve lower triangular system!"); + + let col_j = col[j]; + let (mut new_rowj_adjoint, mut new_colj) = col.rows_range_pair_mut(..j, j + 1..); + assert!(top_left_corner.solve_lower_triangular_mut(&mut new_rowj_adjoint), "Cholesky::insert_column : Unable to solve lower triangular system!"); + new_rowj_adjoint.adjoint_to(&mut chol.slice_range_mut(j, ..j)); // update the center element - let center_element = N::sqrt(col[j] - N::from_real(new_rowj_adjoint.norm_squared())); - chol[(j,j)] = center_element; + let center_element = N::sqrt(col_j - N::from_real(new_rowj_adjoint.norm_squared())); + chol[(j, j)] = center_element; // update the jth column let bottom_left_corner = self.chol.slice_range(j.., ..j); // new_colj = (col_jplus - bottom_left_corner * new_rowj.adjoint()) / center_element; - let mut new_colj = col.rows_range(j+1..).clone_owned(); - new_colj.gemm(-N::one() / center_element, &bottom_left_corner, &new_rowj_adjoint, N::one() / center_element ); + new_colj.gemm(-N::one() / center_element, &bottom_left_corner, &new_rowj_adjoint, N::one() / center_element); chol.slice_range_mut(j+1.., j).copy_from(&new_colj); // update the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j+1.., j+1..); - Self::xx_rank_one_update(&mut bottom_right_corner, &new_colj, -N::real(N::one())); + Self::xx_rank_one_update(&mut bottom_right_corner, &mut new_colj, -N::RealField::one()); Cholesky { chol } } @@ -216,7 +220,7 @@ where ) -> Cholesky> where D: DimSub, - DefaultAllocator: Allocator, DimDiff> + DefaultAllocator: Allocator, DimDiff> + Allocator { let n = self.chol.nrows(); assert!(n > 0, "The matrix needs at least one column."); @@ -231,25 +235,25 @@ where // updates the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j.., j..); - let old_colj = self.chol.slice_range(j+1.., j); - Self::xx_rank_one_update(&mut bottom_right_corner, &old_colj, N::real(N::one())); + let mut workspace = self.chol.column(j).clone_owned(); + let mut old_colj = workspace.rows_range_mut(j+1..); + Self::xx_rank_one_update(&mut bottom_right_corner, &mut old_colj, N::RealField::one()); Cholesky { chol } } /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, - /// performs a rank one update such that we end up with the decomposition of `M + sigma * v*v.adjoint()`. + /// performs a rank one update such that we end up with the decomposition of `M + sigma * x*x.adjoint()`. /// /// This helper method is calling for by `rank_one_update` but also `insert_column` and `remove_column` /// where it is used on a square slice of the decomposition - fn xx_rank_one_update(chol : &mut Matrix, x: &Vector, sigma: N::RealField) + fn xx_rank_one_update(chol : &mut Matrix, x: &mut Vector, sigma: N::RealField) where //N: ComplexField, Dm: Dim, Rx: Dim, Sm: StorageMut, - Sx: Storage, - DefaultAllocator: Allocator, + Sx: StorageMut, { // heavily inspired by Eigen's `llt_rank_update_lower` implementation https://eigen.tuxfamily.org/dox/LLT_8h_source.html let n = x.nrows(); @@ -258,8 +262,9 @@ where chol.nrows(), "The input vector must be of the same size as the factorized matrix." ); - let mut x = x.clone_owned(); + let mut beta = crate::one::(); + for j in 0..n { // updates the diagonal let diag = N::real(unsafe { *chol.get_unchecked((j, j)) }); diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index 5f10339d..a89802b2 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -109,7 +109,7 @@ macro_rules! gen_tests( let m = m_updated.clone().remove_column(j).remove_row(j); // remove column from cholesky decomposition and rebuild m - let chol = m.clone().cholesky().unwrap().insert_column(j, &col); + let chol = m.clone().cholesky().unwrap().insert_column(j, col); let m_chol_updated = chol.l() * chol.l().adjoint(); relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7) From cd8fc9285f658dc736b680b04192d08be0b81397 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Sun, 17 Nov 2019 13:24:00 +0100 Subject: [PATCH 32/67] Add some missing spaces. --- src/linalg/cholesky.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 111f5680..1a310cbf 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -148,7 +148,7 @@ where } /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, - /// performs a rank one update such that we end up with the decomposition of `M + sigma * v*v.adjoint()`. + /// performs a rank one update such that we end up with the decomposition of `M + sigma * (v * v.adjoint())`. #[inline] pub fn rank_one_update(&mut self, x: &Vector, sigma: N::RealField) where @@ -182,9 +182,9 @@ where // loads the data into a new matrix with an additional jth row/column let mut chol = unsafe { Matrix::new_uninitialized_generic(self.chol.data.shape().0.add(U1), self.chol.data.shape().1.add(U1)) }; chol.slice_range_mut(..j, ..j).copy_from(&self.chol.slice_range(..j, ..j)); - chol.slice_range_mut(..j, j+1..).copy_from(&self.chol.slice_range(..j, j..)); - chol.slice_range_mut(j+1.., ..j).copy_from(&self.chol.slice_range(j.., ..j)); - chol.slice_range_mut(j+1.., j+1..).copy_from(&self.chol.slice_range(j.., j..)); + chol.slice_range_mut(..j, j + 1..).copy_from(&self.chol.slice_range(..j, j..)); + chol.slice_range_mut(j + 1.., ..j).copy_from(&self.chol.slice_range(j.., ..j)); + chol.slice_range_mut(j + 1.., j + 1..).copy_from(&self.chol.slice_range(j.., j..)); // update the jth row let top_left_corner = self.chol.slice_range(..j, ..j); @@ -203,10 +203,10 @@ where let bottom_left_corner = self.chol.slice_range(j.., ..j); // new_colj = (col_jplus - bottom_left_corner * new_rowj.adjoint()) / center_element; new_colj.gemm(-N::one() / center_element, &bottom_left_corner, &new_rowj_adjoint, N::one() / center_element); - chol.slice_range_mut(j+1.., j).copy_from(&new_colj); + chol.slice_range_mut(j + 1.., j).copy_from(&new_colj); // update the bottom right corner - let mut bottom_right_corner = chol.slice_range_mut(j+1.., j+1..); + let mut bottom_right_corner = chol.slice_range_mut(j + 1.., j + 1..); Self::xx_rank_one_update(&mut bottom_right_corner, &mut new_colj, -N::RealField::one()); Cholesky { chol } @@ -229,21 +229,21 @@ where // loads the data into a new matrix except for the jth row/column let mut chol = unsafe { Matrix::new_uninitialized_generic(self.chol.data.shape().0.sub(U1), self.chol.data.shape().1.sub(U1)) }; chol.slice_range_mut(..j, ..j).copy_from(&self.chol.slice_range(..j, ..j)); - chol.slice_range_mut(..j, j..).copy_from(&self.chol.slice_range(..j, j+1..)); - chol.slice_range_mut(j.., ..j).copy_from(&self.chol.slice_range(j+1.., ..j)); - chol.slice_range_mut(j.., j..).copy_from(&self.chol.slice_range(j+1.., j+1..)); + chol.slice_range_mut(..j, j..).copy_from(&self.chol.slice_range(..j, j + 1..)); + chol.slice_range_mut(j.., ..j).copy_from(&self.chol.slice_range(j + 1.., ..j)); + chol.slice_range_mut(j.., j..).copy_from(&self.chol.slice_range(j + 1.., j + 1..)); // updates the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j.., j..); let mut workspace = self.chol.column(j).clone_owned(); - let mut old_colj = workspace.rows_range_mut(j+1..); + let mut old_colj = workspace.rows_range_mut(j + 1..); Self::xx_rank_one_update(&mut bottom_right_corner, &mut old_colj, N::RealField::one()); Cholesky { chol } } /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, - /// performs a rank one update such that we end up with the decomposition of `M + sigma * x*x.adjoint()`. + /// performs a rank one update such that we end up with the decomposition of `M + sigma * (x * x.adjoint())`. /// /// This helper method is calling for by `rank_one_update` but also `insert_column` and `remove_column` /// where it is used on a square slice of the decomposition From 8404839233680f9f68a471620c65d444a1dcf948 Mon Sep 17 00:00:00 2001 From: Avi Weinstock Date: Fri, 15 Nov 2019 14:29:18 -0500 Subject: [PATCH 33/67] Import IsaacRng from rand_isaac instead of rand. The rand crate removed IsaacRng in the 0.5 -> 0.6 transition, so the benchmarks that use it haven't compiled since 9c37c512039059281b210521cfd53e931f5fa0b5. --- Cargo.toml | 1 + benches/core/matrix.rs | 5 +++-- benches/core/vector.rs | 3 ++- benches/geometry/quaternion.rs | 5 +++-- benches/lib.rs | 6 ++++-- 5 files changed, 13 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 582e0824..1569b0a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,6 +56,7 @@ pest_derive = { version = "2.0", optional = true } [dev-dependencies] serde_json = "1.0" rand_xorshift = "0.2" +rand_isaac = "0.2" ### Uncomment this line before running benchmarks. ### We can't just let this uncommented because that would break ### compilation for #[no-std] because of the terrible Cargo bug diff --git a/benches/core/matrix.rs b/benches/core/matrix.rs index 12fb836d..ac983ff4 100644 --- a/benches/core/matrix.rs +++ b/benches/core/matrix.rs @@ -1,5 +1,6 @@ use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, MatrixN, Vector2, Vector3, Vector4, U10}; -use rand::{IsaacRng, Rng}; +use rand::Rng; +use rand_isaac::IsaacRng; use std::ops::{Add, Div, Mul, Sub}; #[path = "../common/macros.rs"] @@ -237,4 +238,4 @@ criterion_group!(matrix, mat_mul_mat, mat100_from_fn, mat500_from_fn, -); \ No newline at end of file +); diff --git a/benches/core/vector.rs b/benches/core/vector.rs index 7d3237e8..fd44aedc 100644 --- a/benches/core/vector.rs +++ b/benches/core/vector.rs @@ -1,5 +1,6 @@ use na::{DVector, Vector2, Vector3, Vector4, VectorN}; -use rand::{IsaacRng, Rng}; +use rand::Rng; +use rand_isaac::IsaacRng; use std::ops::{Add, Div, Mul, Sub}; use typenum::U10000; diff --git a/benches/geometry/quaternion.rs b/benches/geometry/quaternion.rs index dd079aac..326872f3 100644 --- a/benches/geometry/quaternion.rs +++ b/benches/geometry/quaternion.rs @@ -1,5 +1,6 @@ use na::{Quaternion, UnitQuaternion, Vector3}; -use rand::{IsaacRng, Rng}; +use rand::Rng; +use rand_isaac::IsaacRng; use std::ops::{Add, Div, Mul, Sub}; #[path = "../common/macros.rs"] @@ -34,4 +35,4 @@ criterion_group!(quaternion, quaternion_div_s, quaternion_inv, unit_quaternion_inv -); \ No newline at end of file +); diff --git a/benches/lib.rs b/benches/lib.rs index d5333542..e4215a12 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -3,6 +3,7 @@ extern crate nalgebra as na; extern crate rand; +extern crate rand_isaac; extern crate test; extern crate typenum; @@ -10,7 +11,8 @@ extern crate typenum; extern crate criterion; use na::DMatrix; -use rand::{IsaacRng, Rng}; +use rand::Rng; +use rand_isaac::IsaacRng; pub mod core; pub mod geometry; @@ -36,4 +38,4 @@ criterion_main!( linalg::solve, linalg::svd, linalg::symmetric_eigen, -); \ No newline at end of file +); From 50ad84e4b1a9861486b06ace5f50f5286385b9bc Mon Sep 17 00:00:00 2001 From: Nestor Demeure Date: Sun, 17 Nov 2019 13:40:19 +0100 Subject: [PATCH 34/67] Update cholesky.rs corrected typo ni doc for `xx_rank_one_update` --- src/linalg/cholesky.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 1a310cbf..67baefb1 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -242,10 +242,10 @@ where Cholesky { chol } } - /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`, + /// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `x`, /// performs a rank one update such that we end up with the decomposition of `M + sigma * (x * x.adjoint())`. /// - /// This helper method is calling for by `rank_one_update` but also `insert_column` and `remove_column` + /// This helper method is called by `rank_one_update` but also `insert_column` and `remove_column` /// where it is used on a square slice of the decomposition fn xx_rank_one_update(chol : &mut Matrix, x: &mut Vector, sigma: N::RealField) where From e1c8e1bccfaca6c25fdfd534f72ea901f6cfe0e2 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 4 Sep 2019 16:02:31 +0200 Subject: [PATCH 35/67] Fix Vector::axpy for noncommutative cases One example would be performing simple matrix multiplication over a division algebra such as quaternions. --- src/base/blas.rs | 16 ++--- tests/core/blas.rs | 172 ++++++++++++++++++++++++++------------------- 2 files changed, 106 insertions(+), 82 deletions(-) diff --git a/src/base/blas.rs b/src/base/blas.rs index cc8f2345..dec28d07 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -473,7 +473,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul { for i in 0..len { unsafe { let y = y.get_unchecked_mut(i * stride1); - *y = a * *x.get_unchecked(i * stride2) + beta * *y; + *y = *x.get_unchecked(i * stride2) * a + *y * beta; } } } @@ -482,7 +482,7 @@ fn array_ax(y: &mut [N], a: N, x: &[N], stride1: usize, stride2: usize, len: where N: Scalar + Zero + ClosedAdd + ClosedMul { for i in 0..len { unsafe { - *y.get_unchecked_mut(i * stride1) = a * *x.get_unchecked(i * stride2); + *y.get_unchecked_mut(i * stride1) = *x.get_unchecked(i * stride2) * a; } } } @@ -579,13 +579,13 @@ where // FIXME: avoid bound checks. let col2 = a.column(0); let val = unsafe { *x.vget_unchecked(0) }; - self.axpy(alpha * val, &col2, beta); + self.axpy(val * alpha, &col2, beta); for j in 1..ncols2 { let col2 = a.column(j); let val = unsafe { *x.vget_unchecked(j) }; - self.axpy(alpha * val, &col2, N::one()); + self.axpy(val * alpha, &col2, N::one()); } } @@ -624,7 +624,7 @@ where // FIXME: avoid bound checks. let col2 = a.column(0); let val = unsafe { *x.vget_unchecked(0) }; - self.axpy(alpha * val, &col2, beta); + self.axpy(val * alpha, &col2, beta); self[0] += alpha * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); for j in 1..dim2 { @@ -637,7 +637,7 @@ where *self.vget_unchecked_mut(j) += alpha * dot; } self.rows_range_mut(j + 1..) - .axpy(alpha * val, &col2.rows_range(j + 1..), N::one()); + .axpy(val * alpha, &col2.rows_range(j + 1..), N::one()); } } @@ -890,7 +890,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul for j in 0..ncols1 { // FIXME: avoid bound checks. let val = unsafe { conjugate(*y.vget_unchecked(j)) }; - self.column_mut(j).axpy(alpha * val, x, beta); + self.column_mut(j).axpy(val * alpha, x, beta); } } @@ -1256,7 +1256,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul let subdim = Dynamic::new(dim1 - j); // FIXME: avoid bound checks. self.generic_slice_mut((j, j), (subdim, U1)).axpy( - alpha * val, + val * alpha, &x.rows_range(j..), beta, ); diff --git a/tests/core/blas.rs b/tests/core/blas.rs index 38113c17..9b7be4af 100644 --- a/tests/core/blas.rs +++ b/tests/core/blas.rs @@ -1,105 +1,129 @@ -#![cfg(feature = "arbitrary")] +use na::{geometry::Quaternion, Matrix2, Vector3}; +use num_traits::{One, Zero}; -use na::{DMatrix, DVector}; -use std::cmp; +#[test] +fn gemm_noncommutative() { + type Qf64 = Quaternion; + let i = Qf64::from_imag(Vector3::new(1.0, 0.0, 0.0)); + let j = Qf64::from_imag(Vector3::new(0.0, 1.0, 0.0)); + let k = Qf64::from_imag(Vector3::new(0.0, 0.0, 1.0)); -quickcheck! { - /* - * - * Symmetric operators. - * - */ - fn gemv_symm(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); - let a = DMatrix::::new_random(n, n); - let a = &a * a.transpose(); + let m1 = Matrix2::new(k, Qf64::zero(), j, i); + // this is the inverse of m1 + let m2 = Matrix2::new(-k, Qf64::zero(), Qf64::one(), -i); - let x = DVector::new_random(n); - let mut y1 = DVector::new_random(n); - let mut y2 = y1.clone(); + let mut res: Matrix2 = Matrix2::zero(); + res.gemm(Qf64::one(), &m1, &m2, Qf64::zero()); + assert_eq!(res, Matrix2::identity()); - y1.gemv(alpha, &a, &x, beta); - y2.sygemv(alpha, &a.lower_triangle(), &x, beta); + let mut res: Matrix2 = Matrix2::identity(); + res.gemm(k, &m1, &m2, -k); + assert_eq!(res, Matrix2::zero()); +} - if !relative_eq!(y1, y2, epsilon = 1.0e-10) { - return false; +#[cfg(feature = "arbitrary")] +mod blas_quickcheck { + use na::{DMatrix, DVector}; + use std::cmp; + + quickcheck! { + /* + * + * Symmetric operators. + * + */ + fn gemv_symm(n: usize, alpha: f64, beta: f64) -> bool { + let n = cmp::max(1, cmp::min(n, 50)); + let a = DMatrix::::new_random(n, n); + let a = &a * a.transpose(); + + let x = DVector::new_random(n); + let mut y1 = DVector::new_random(n); + let mut y2 = y1.clone(); + + y1.gemv(alpha, &a, &x, beta); + y2.sygemv(alpha, &a.lower_triangle(), &x, beta); + + if !relative_eq!(y1, y2, epsilon = 1.0e-10) { + return false; + } + + y1.gemv(alpha, &a, &x, 0.0); + y2.sygemv(alpha, &a.lower_triangle(), &x, 0.0); + + relative_eq!(y1, y2, epsilon = 1.0e-10) } - y1.gemv(alpha, &a, &x, 0.0); - y2.sygemv(alpha, &a.lower_triangle(), &x, 0.0); + fn gemv_tr(n: usize, alpha: f64, beta: f64) -> bool { + let n = cmp::max(1, cmp::min(n, 50)); + let a = DMatrix::::new_random(n, n); + let x = DVector::new_random(n); + let mut y1 = DVector::new_random(n); + let mut y2 = y1.clone(); - relative_eq!(y1, y2, epsilon = 1.0e-10) - } + y1.gemv(alpha, &a, &x, beta); + y2.gemv_tr(alpha, &a.transpose(), &x, beta); - fn gemv_tr(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); - let a = DMatrix::::new_random(n, n); - let x = DVector::new_random(n); - let mut y1 = DVector::new_random(n); - let mut y2 = y1.clone(); + if !relative_eq!(y1, y2, epsilon = 1.0e-10) { + return false; + } - y1.gemv(alpha, &a, &x, beta); - y2.gemv_tr(alpha, &a.transpose(), &x, beta); + y1.gemv(alpha, &a, &x, 0.0); + y2.gemv_tr(alpha, &a.transpose(), &x, 0.0); - if !relative_eq!(y1, y2, epsilon = 1.0e-10) { - return false; + relative_eq!(y1, y2, epsilon = 1.0e-10) } - y1.gemv(alpha, &a, &x, 0.0); - y2.gemv_tr(alpha, &a.transpose(), &x, 0.0); + fn ger_symm(n: usize, alpha: f64, beta: f64) -> bool { + let n = cmp::max(1, cmp::min(n, 50)); + let a = DMatrix::::new_random(n, n); + let mut a1 = &a * a.transpose(); + let mut a2 = a1.lower_triangle(); - relative_eq!(y1, y2, epsilon = 1.0e-10) - } + let x = DVector::new_random(n); + let y = DVector::new_random(n); - fn ger_symm(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); - let a = DMatrix::::new_random(n, n); - let mut a1 = &a * a.transpose(); - let mut a2 = a1.lower_triangle(); + a1.ger(alpha, &x, &y, beta); + a2.syger(alpha, &x, &y, beta); - let x = DVector::new_random(n); - let y = DVector::new_random(n); + if !relative_eq!(a1.lower_triangle(), a2) { + return false; + } - a1.ger(alpha, &x, &y, beta); - a2.syger(alpha, &x, &y, beta); + a1.ger(alpha, &x, &y, 0.0); + a2.syger(alpha, &x, &y, 0.0); - if !relative_eq!(a1.lower_triangle(), a2) { - return false; + relative_eq!(a1.lower_triangle(), a2) } - a1.ger(alpha, &x, &y, 0.0); - a2.syger(alpha, &x, &y, 0.0); + fn quadform(n: usize, alpha: f64, beta: f64) -> bool { + let n = cmp::max(1, cmp::min(n, 50)); + let rhs = DMatrix::::new_random(6, n); + let mid = DMatrix::::new_random(6, 6); + let mut res = DMatrix::new_random(n, n); - relative_eq!(a1.lower_triangle(), a2) - } + let expected = &res * beta + rhs.transpose() * &mid * &rhs * alpha; - fn quadform(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); - let rhs = DMatrix::::new_random(6, n); - let mid = DMatrix::::new_random(6, 6); - let mut res = DMatrix::new_random(n, n); + res.quadform(alpha, &mid, &rhs, beta); - let expected = &res * beta + rhs.transpose() * &mid * &rhs * alpha; + println!("{}{}", res, expected); - res.quadform(alpha, &mid, &rhs, beta); + relative_eq!(res, expected, epsilon = 1.0e-7) + } - println!("{}{}", res, expected); + fn quadform_tr(n: usize, alpha: f64, beta: f64) -> bool { + let n = cmp::max(1, cmp::min(n, 50)); + let lhs = DMatrix::::new_random(6, n); + let mid = DMatrix::::new_random(n, n); + let mut res = DMatrix::new_random(6, 6); - relative_eq!(res, expected, epsilon = 1.0e-7) - } + let expected = &res * beta + &lhs * &mid * lhs.transpose() * alpha; - fn quadform_tr(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); - let lhs = DMatrix::::new_random(6, n); - let mid = DMatrix::::new_random(n, n); - let mut res = DMatrix::new_random(6, 6); + res.quadform_tr(alpha, &lhs, &mid , beta); - let expected = &res * beta + &lhs * &mid * lhs.transpose() * alpha; + println!("{}{}", res, expected); - res.quadform_tr(alpha, &lhs, &mid , beta); - - println!("{}{}", res, expected); - - relative_eq!(res, expected, epsilon = 1.0e-7) + relative_eq!(res, expected, epsilon = 1.0e-7) + } } } From 4e25bd87fb91f691162d52db5c9f8145bf6b6bd9 Mon Sep 17 00:00:00 2001 From: Aaron Hill Date: Sun, 4 Aug 2019 11:20:19 -0400 Subject: [PATCH 36/67] Don't call 'offset' on a dangling pointer When creating a matrix with only one zero dimension, we end up with a matrix with a total size of zero, but a non-zero stride for elements. While such a matrix can never actually have any elements, we need to be careful with how we use the pointer associated with it. Since such a pointer will always be dangling, it can never be used with `ptr.offset`, which requires that the pointer be in-bounds or one passed the end of an allocation. Violating this results in undefined behavior. This commit adds in checks before the uses of `ptr.offset`. If we ever need to offset from a pointer when our actual allocation size is zero, we skip offsetting, and return the original pointer. This is fine because any actual use of the original or offsetted pointer would already be undefined behavior - we shoul never be trying to dereference the pointer associated with a zero-size matrix. This issue was caught be running `cargo miri test` on the project. --- src/base/iter.rs | 20 +++++++++++++++++++- src/base/storage.rs | 22 ++++++++++++++++++++-- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/src/base/iter.rs b/src/base/iter.rs index 74e4f018..bad4e0be 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -27,12 +27,30 @@ macro_rules! iterator { let shape = storage.shape(); let strides = storage.strides(); let inner_offset = shape.0.value() * strides.0.value(); + let size = shape.0.value() * shape.1.value(); let ptr = storage.$ptr(); + // If we have a size of 0, 'ptr' must be + // dangling. Howver, 'inner_offset' might + // not be zero if only one dimension is zero, so + // we don't want to call 'offset'. + // This pointer will never actually get used + // if our size is '0', so it's fine to use + // 'ptr' for both the start and end. + let inner_end = if size == 0 { + ptr + } else { + // Safety: + // If 'size' is non-zero, we know that 'ptr' + // is not dangling, and 'inner_offset' must lie + // within the allocation + unsafe { ptr.offset(inner_offset as isize) } + }; + $Name { ptr: ptr, inner_ptr: ptr, - inner_end: unsafe { ptr.offset(inner_offset as isize) }, + inner_end, size: shape.0.value() * shape.1.value(), strides: strides, _phantoms: PhantomData, diff --git a/src/base/storage.rs b/src/base/storage.rs index 02941e47..80a6a2d8 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -72,7 +72,16 @@ pub unsafe trait Storage: Debug + Sized { /// Gets the address of the i-th matrix component without performing bound-checking. #[inline] unsafe fn get_address_unchecked_linear(&self, i: usize) -> *const N { - self.ptr().offset(i as isize) + let shape = self.shape(); + if shape.0.value() * shape.1.value() == 0 { + // If we have a zero-size matrix, our pointer must + // be dangling. Instead of calling 'offset', we + // just re-use our pointer, since actually using + // it would be undefined behavior + self.ptr() + } else { + self.ptr().offset(i as isize) + } } /// Gets the address of the i-th matrix component without performing bound-checking. @@ -124,7 +133,16 @@ pub unsafe trait StorageMut: Storage { /// Gets the mutable address of the i-th matrix component without performing bound-checking. #[inline] unsafe fn get_address_unchecked_linear_mut(&mut self, i: usize) -> *mut N { - self.ptr_mut().offset(i as isize) + let shape = self.shape(); + if shape.0.value() * shape.1.value() == 0 { + // If we have a zero-size matrix, our pointer must + // be dangling. Instead of calling 'offset', we + // just re-use our pointer, since actually using + // it would be undefined behavior + self.ptr_mut() + } else { + self.ptr_mut().offset(i as isize) + } } /// Gets the mutable address of the i-th matrix component without performing bound-checking. From fe65b1c129a16dfff13a1541f590260fcc939251 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Mon, 4 Nov 2019 10:27:57 +0100 Subject: [PATCH 37/67] Add Vector::axcpy method The added method `Vector::axcpy` generalises `Vector::gemv` to noncommutative cases since it allows us to write for `gemv` `self.axcpy(alpha, &col2, val, beta)`, instead the usual `self.axpy(alpha * val, &col2, beta)`. Hence, `axcpy` preserves the order of scalar multiplication which is important for applications where commutativity is not guaranteed (e.g., matrices of quaternions, etc.). This commmit also removes helpers `array_axpy` and `array_ax`, and replaces them with `array_axcpy` and `array_axc` respectively, which like above preserve the order of scalar multiplication. Finally, `Vector::axpy` is preserved, however, now expressed in terms of `Vector::axcpy` like so: ``` self.axcpy(alpha * val, &col2, beta) ``` --- src/base/blas.rs | 68 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 46 insertions(+), 22 deletions(-) diff --git a/src/base/blas.rs b/src/base/blas.rs index dec28d07..622761fe 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -468,21 +468,21 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul } } -fn array_axpy(y: &mut [N], a: N, x: &[N], beta: N, stride1: usize, stride2: usize, len: usize) +fn array_axcpy(y: &mut [N], a: N, x: &[N], c: N, beta: N, stride1: usize, stride2: usize, len: usize) where N: Scalar + Zero + ClosedAdd + ClosedMul { for i in 0..len { unsafe { let y = y.get_unchecked_mut(i * stride1); - *y = *x.get_unchecked(i * stride2) * a + *y * beta; + *y = a * *x.get_unchecked(i * stride2) * c + beta * *y; } } } -fn array_ax(y: &mut [N], a: N, x: &[N], stride1: usize, stride2: usize, len: usize) +fn array_axc(y: &mut [N], a: N, x: &[N], c: N, stride1: usize, stride2: usize, len: usize) where N: Scalar + Zero + ClosedAdd + ClosedMul { for i in 0..len { unsafe { - *y.get_unchecked_mut(i * stride1) = *x.get_unchecked(i * stride2) * a; + *y.get_unchecked_mut(i * stride1) = a * *x.get_unchecked(i * stride2) * c; } } } @@ -492,6 +492,40 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul, S: StorageMut, { + /// Computes `self = a * x * c + b * self`. + /// + /// If `b` is zero, `self` is never read from. + /// + /// # Examples: + /// + /// ``` + /// # use nalgebra::Vector3; + /// let mut vec1 = Vector3::new(1.0, 2.0, 3.0); + /// let vec2 = Vector3::new(0.1, 0.2, 0.3); + /// vec1.axcpy(5.0, &vec2, 2.0, 5.0); + /// assert_eq!(vec1, Vector3::new(6.0, 12.0, 18.0)); + /// ``` + #[inline] + pub fn axcpy(&mut self, a: N, x: &Vector, c: N, b: N) + where + SB: Storage, + ShapeConstraint: DimEq, + { + assert_eq!(self.nrows(), x.nrows(), "Axcpy: mismatched vector shapes."); + + let rstride1 = self.strides().0; + let rstride2 = x.strides().0; + + let y = self.data.as_mut_slice(); + let x = x.data.as_slice(); + + if !b.is_zero() { + array_axcpy(y, a, x, c, b, rstride1, rstride2, x.len()); + } else { + array_axc(y, a, x, c, rstride1, rstride2, x.len()); + } + } + /// Computes `self = a * x + b * self`. /// /// If `b` is zero, `self` is never read from. @@ -508,22 +542,12 @@ where #[inline] pub fn axpy(&mut self, a: N, x: &Vector, b: N) where + N: One, SB: Storage, ShapeConstraint: DimEq, { assert_eq!(self.nrows(), x.nrows(), "Axpy: mismatched vector shapes."); - - let rstride1 = self.strides().0; - let rstride2 = x.strides().0; - - let y = self.data.as_mut_slice(); - let x = x.data.as_slice(); - - if !b.is_zero() { - array_axpy(y, a, x, b, rstride1, rstride2, x.len()); - } else { - array_ax(y, a, x, rstride1, rstride2, x.len()); - } + self.axcpy(a, x, N::one(), b) } /// Computes `self = alpha * a * x + beta * self`, where `a` is a matrix, `x` a vector, and @@ -579,13 +603,13 @@ where // FIXME: avoid bound checks. let col2 = a.column(0); let val = unsafe { *x.vget_unchecked(0) }; - self.axpy(val * alpha, &col2, beta); + self.axcpy(alpha, &col2, val, beta); for j in 1..ncols2 { let col2 = a.column(j); let val = unsafe { *x.vget_unchecked(j) }; - self.axpy(val * alpha, &col2, N::one()); + self.axcpy(alpha, &col2, val, N::one()); } } @@ -624,7 +648,7 @@ where // FIXME: avoid bound checks. let col2 = a.column(0); let val = unsafe { *x.vget_unchecked(0) }; - self.axpy(val * alpha, &col2, beta); + self.axpy(alpha * val, &col2, beta); self[0] += alpha * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); for j in 1..dim2 { @@ -637,7 +661,7 @@ where *self.vget_unchecked_mut(j) += alpha * dot; } self.rows_range_mut(j + 1..) - .axpy(val * alpha, &col2.rows_range(j + 1..), N::one()); + .axpy(alpha * val, &col2.rows_range(j + 1..), N::one()); } } @@ -890,7 +914,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul for j in 0..ncols1 { // FIXME: avoid bound checks. let val = unsafe { conjugate(*y.vget_unchecked(j)) }; - self.column_mut(j).axpy(val * alpha, x, beta); + self.column_mut(j).axpy(alpha * val, x, beta); } } @@ -1256,7 +1280,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul let subdim = Dynamic::new(dim1 - j); // FIXME: avoid bound checks. self.generic_slice_mut((j, j), (subdim, U1)).axpy( - val * alpha, + alpha * val, &x.rows_range(j..), beta, ); From 7d9901547307c0073654da587f4ce0aa3dbb1ef1 Mon Sep 17 00:00:00 2001 From: Avi Weinstock Date: Tue, 19 Nov 2019 15:57:37 -0500 Subject: [PATCH 38/67] Move `Copy` constraint from the definition of `Scalar` to all its use-sites. This should semantically be a no-op, but enables refactorings to use non-Copy scalars on a case-by-case basis. Also, the only instance of a `One + Zero` trait bound was changed into a `Zero + One` bound to match the others. The following sed scripts were used in the refactoring (with each clause added to reduce the error count of `cargo check`): ```bash export RELEVANT_SOURCEFILES="$(find src -name '*.rs') $(find examples -name '*.rs')" for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar,/N: Scalar+Copy,/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar + Field/N: Scalar + Copy + Field/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar + Zero/N: Scalar + Copy + Zero/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar + Closed/N: Scalar + Copy + Closed/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar + Eq/N: Scalar + Copy + Eq/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar + PartialOrd/N: Scalar + Copy + PartialOrd/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: *Scalar + Zero/N: Scalar + Copy + Zero/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar + PartialEq/N: Scalar + Copy + PartialEq/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar>/N: Scalar+Copy>/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: Scalar + $bound/N: Scalar + Copy + $bound/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: *Scalar + $bound/N: Scalar + Copy + $bound/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\): *Scalar,/N\1: Scalar+Copy,/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N: *Scalar + $trait/N: Scalar + Copy + $trait/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\): *Scalar + Superset/N\1: Scalar + Copy + Superset/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\): *Scalar + \([a-zA-Z]*Eq\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \([a-zA-Z]*Eq\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(hash::\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar {/N\1: Scalar + Copy {/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Zero\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Bounded\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Lattice\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Meet\|Join\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(fmt::\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Ring\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Hash\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Send\|Sync\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/One + Zero/Zero + One/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Zero\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \($marker\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar>/N\1: Scalar + Copy>/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/Scalar+Copy/Scalar + Copy/' $f; done ``` --- examples/scalar_genericity.rs | 4 +- src/base/allocator.rs | 12 ++-- src/base/array_storage.rs | 10 +-- src/base/blas.rs | 18 ++--- src/base/cg.rs | 6 +- src/base/componentwise.rs | 6 +- src/base/construction.rs | 22 +++--- src/base/construction_slice.rs | 16 ++--- src/base/conversion.rs | 54 +++++++------- src/base/coordinates.rs | 6 +- src/base/default_allocator.rs | 20 +++--- src/base/edition.rs | 24 +++---- src/base/indexing.rs | 18 ++--- src/base/iter.rs | 40 +++++------ src/base/matrix.rs | 90 ++++++++++++------------ src/base/matrix_alga.rs | 28 ++++---- src/base/matrix_slice.rs | 40 +++++------ src/base/ops.rs | 64 ++++++++--------- src/base/properties.rs | 2 +- src/base/scalar.rs | 2 +- src/base/statistics.rs | 4 +- src/base/storage.rs | 8 +-- src/base/swizzle.rs | 2 +- src/base/vec_storage.rs | 18 ++--- src/debug/random_orthogonal.rs | 2 +- src/debug/random_sdp.rs | 2 +- src/geometry/op_macros.rs | 8 +-- src/geometry/perspective.rs | 2 +- src/geometry/point.rs | 28 ++++---- src/geometry/point_alga.rs | 10 +-- src/geometry/point_construction.rs | 12 ++-- src/geometry/point_conversion.rs | 20 +++--- src/geometry/point_coordinates.rs | 4 +- src/geometry/point_ops.rs | 18 ++--- src/geometry/reflection.rs | 2 +- src/geometry/rotation.rs | 26 +++---- src/geometry/rotation_construction.rs | 4 +- src/geometry/rotation_ops.rs | 2 +- src/geometry/swizzle.rs | 2 +- src/geometry/translation.rs | 30 ++++---- src/geometry/translation_construction.rs | 8 +-- src/geometry/translation_conversion.rs | 8 +-- src/geometry/translation_coordinates.rs | 4 +- src/linalg/lu.rs | 4 +- src/linalg/permutation_sequence.rs | 8 +-- src/sparse/cs_matrix.rs | 28 ++++---- src/sparse/cs_matrix_conversion.rs | 8 +-- src/sparse/cs_matrix_ops.rs | 12 ++-- 48 files changed, 383 insertions(+), 383 deletions(-) diff --git a/examples/scalar_genericity.rs b/examples/scalar_genericity.rs index 75f6f9d4..18b5c52a 100644 --- a/examples/scalar_genericity.rs +++ b/examples/scalar_genericity.rs @@ -4,11 +4,11 @@ extern crate nalgebra as na; use alga::general::{RealField, RingCommutative}; use na::{Scalar, Vector3}; -fn print_vector(m: &Vector3) { +fn print_vector(m: &Vector3) { println!("{:?}", m) } -fn print_squared_norm(v: &Vector3) { +fn print_squared_norm(v: &Vector3) { // NOTE: alternatively, nalgebra already defines `v.squared_norm()`. let sqnorm = v.dot(v); println!("{:?}", sqnorm); diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 0ad30981..246f3620 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -16,7 +16,7 @@ use crate::base::{DefaultAllocator, Scalar}; /// /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. -pub trait Allocator: Any + Sized { +pub trait Allocator: Any + Sized { /// The type of buffer this allocator can instanciate. type Buffer: ContiguousStorageMut + Clone; @@ -33,7 +33,7 @@ pub trait Allocator: Any + Sized { /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// CFrom) elements to a smaller or larger size (RTo, CTo). -pub trait Reallocator: +pub trait Reallocator: Allocator + Allocator { /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer @@ -65,7 +65,7 @@ where R2: Dim, C1: Dim, C2: Dim, - N: Scalar, + N: Scalar + Copy, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } @@ -76,7 +76,7 @@ where R2: Dim, C1: Dim, C2: Dim, - N: Scalar, + N: Scalar + Copy, DefaultAllocator: Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, {} @@ -88,7 +88,7 @@ pub trait SameShapeVectorAllocator: where R1: Dim, R2: Dim, - N: Scalar, + N: Scalar + Copy, ShapeConstraint: SameNumberOfRows, { } @@ -97,7 +97,7 @@ impl SameShapeVectorAllocator for DefaultAllocator where R1: Dim, R2: Dim, - N: Scalar, + N: Scalar + Copy, DefaultAllocator: Allocator + Allocator>, ShapeConstraint: SameNumberOfRows, {} diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index bebb8740..38144d8a 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -154,7 +154,7 @@ where unsafe impl Storage for ArrayStorage where - N: Scalar, + N: Scalar + Copy, R: DimName, C: DimName, R::Value: Mul, @@ -206,7 +206,7 @@ where unsafe impl StorageMut for ArrayStorage where - N: Scalar, + N: Scalar + Copy, R: DimName, C: DimName, R::Value: Mul, @@ -226,7 +226,7 @@ where unsafe impl ContiguousStorage for ArrayStorage where - N: Scalar, + N: Scalar + Copy, R: DimName, C: DimName, R::Value: Mul, @@ -236,7 +236,7 @@ where unsafe impl ContiguousStorageMut for ArrayStorage where - N: Scalar, + N: Scalar + Copy, R: DimName, C: DimName, R::Value: Mul, @@ -295,7 +295,7 @@ struct ArrayStorageVisitor { #[cfg(feature = "serde-serialize")] impl ArrayStorageVisitor where - N: Scalar, + N: Scalar + Copy, R: DimName, C: DimName, R::Value: Mul, diff --git a/src/base/blas.rs b/src/base/blas.rs index 622761fe..a999b6eb 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -48,7 +48,7 @@ impl> Vector { } } -impl> Vector { +impl> Vector { /// Computes the index and value of the vector component with the largest value. /// /// # Examples: @@ -230,7 +230,7 @@ impl> Matrix { } -impl> Matrix { +impl> Matrix { /// Computes the index of the matrix component with the largest absolute value. /// /// # Examples: @@ -264,7 +264,7 @@ impl> Matri } impl> Matrix -where N: Scalar + Zero + ClosedAdd + ClosedMul +where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul { #[inline(always)] fn dotx(&self, rhs: &Matrix, conjugate: impl Fn(N) -> N) -> N @@ -469,7 +469,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul } fn array_axcpy(y: &mut [N], a: N, x: &[N], c: N, beta: N, stride1: usize, stride2: usize, len: usize) -where N: Scalar + Zero + ClosedAdd + ClosedMul { +where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul { for i in 0..len { unsafe { let y = y.get_unchecked_mut(i * stride1); @@ -479,7 +479,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul { } fn array_axc(y: &mut [N], a: N, x: &[N], c: N, stride1: usize, stride2: usize, len: usize) -where N: Scalar + Zero + ClosedAdd + ClosedMul { +where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul { for i in 0..len { unsafe { *y.get_unchecked_mut(i * stride1) = a * *x.get_unchecked(i * stride2) * c; @@ -489,7 +489,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul { impl Vector where - N: Scalar + Zero + ClosedAdd + ClosedMul, + N: Scalar + Copy + Zero + ClosedAdd + ClosedMul, S: StorageMut, { /// Computes `self = a * x * c + b * self`. @@ -886,7 +886,7 @@ where } impl> Matrix -where N: Scalar + Zero + ClosedAdd + ClosedMul +where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul { #[inline(always)] fn gerx( @@ -1249,7 +1249,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul } impl> Matrix -where N: Scalar + Zero + ClosedAdd + ClosedMul +where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul { #[inline(always)] fn xxgerx( @@ -1396,7 +1396,7 @@ where N: Scalar + Zero + ClosedAdd + ClosedMul } impl> SquareMatrix -where N: Scalar + Zero + One + ClosedAdd + ClosedMul +where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul { /// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`. /// diff --git a/src/base/cg.rs b/src/base/cg.rs index 0822ea27..5908c111 100644 --- a/src/base/cg.rs +++ b/src/base/cg.rs @@ -23,7 +23,7 @@ use alga::linear::Transformation; impl MatrixN where - N: Scalar + Ring, + N: Scalar + Copy + Ring, DefaultAllocator: Allocator, { /// Creates a new homogeneous matrix that applies the same scaling factor on each dimension. @@ -153,7 +153,7 @@ impl Matrix4 { } } -impl> SquareMatrix { +impl> SquareMatrix { /// Computes the transformation equal to `self` followed by an uniform scaling factor. #[inline] pub fn append_scaling(&self, scaling: N) -> MatrixN @@ -240,7 +240,7 @@ impl> SquareMatrix { } } -impl> SquareMatrix { +impl> SquareMatrix { /// Computes in-place the transformation equal to `self` followed by an uniform scaling factor. #[inline] pub fn append_scaling_mut(&mut self, scaling: N) diff --git a/src/base/componentwise.rs b/src/base/componentwise.rs index e5f4d7ec..5fadcf36 100644 --- a/src/base/componentwise.rs +++ b/src/base/componentwise.rs @@ -14,7 +14,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixSum, Scalar}; /// The type of the result of a matrix component-wise operation. pub type MatrixComponentOp = MatrixSum; -impl> Matrix { +impl> Matrix { /// Computes the component-wise absolute value. /// /// # Example @@ -45,7 +45,7 @@ impl> Matrix { macro_rules! component_binop_impl( ($($binop: ident, $binop_mut: ident, $binop_assign: ident, $cmpy: ident, $Trait: ident . $op: ident . $op_assign: ident, $desc:expr, $desc_cmpy:expr, $desc_mut:expr);* $(;)*) => {$( - impl> Matrix { + impl> Matrix { #[doc = $desc] #[inline] pub fn $binop(&self, rhs: &Matrix) -> MatrixComponentOp @@ -70,7 +70,7 @@ macro_rules! component_binop_impl( } } - impl> Matrix { + impl> Matrix { // componentwise binop plus Y. #[doc = $desc_cmpy] #[inline] diff --git a/src/base/construction.rs b/src/base/construction.rs index 228ce114..c28c043b 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -27,7 +27,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vec * Generic constructors. * */ -impl MatrixMN +impl MatrixMN where DefaultAllocator: Allocator { /// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics @@ -286,7 +286,7 @@ where DefaultAllocator: Allocator impl MatrixN where - N: Scalar, + N: Scalar + Copy, DefaultAllocator: Allocator, { /// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0. @@ -330,7 +330,7 @@ where */ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl MatrixMN + impl MatrixMN where DefaultAllocator: Allocator { /// Creates a new uninitialized matrix or vector. @@ -559,7 +559,7 @@ macro_rules! impl_constructors( } } - impl MatrixMN + impl MatrixMN where DefaultAllocator: Allocator, Standard: Distribution { @@ -603,7 +603,7 @@ impl_constructors!(Dynamic, Dynamic; */ macro_rules! impl_constructors_from_data( ($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl MatrixMN + impl MatrixMN where DefaultAllocator: Allocator { /// Creates a matrix with its elements filled with the components provided by a slice /// in row-major order. @@ -721,7 +721,7 @@ impl_constructors_from_data!(data; Dynamic, Dynamic; */ impl Zero for MatrixMN where - N: Scalar + Zero + ClosedAdd, + N: Scalar + Copy + Zero + ClosedAdd, DefaultAllocator: Allocator, { #[inline] @@ -737,7 +737,7 @@ where impl One for MatrixN where - N: Scalar + Zero + One + ClosedMul + ClosedAdd, + N: Scalar + Copy + Zero + One + ClosedMul + ClosedAdd, DefaultAllocator: Allocator, { #[inline] @@ -748,7 +748,7 @@ where impl Bounded for MatrixMN where - N: Scalar + Bounded, + N: Scalar + Copy + Bounded, DefaultAllocator: Allocator, { #[inline] @@ -762,7 +762,7 @@ where } } -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -822,7 +822,7 @@ where macro_rules! componentwise_constructors_impl( ($($R: ty, $C: ty, $($args: ident:($irow: expr,$icol: expr)),*);* $(;)*) => {$( impl MatrixMN - where N: Scalar, + where N: Scalar + Copy, DefaultAllocator: Allocator { /// Initializes this matrix from its components. #[inline] @@ -990,7 +990,7 @@ componentwise_constructors_impl!( */ impl VectorN where - N: Scalar + Zero + One, + N: Scalar + Copy + Zero + One, DefaultAllocator: Allocator, { /// The column vector with a 1 as its first component, and zero elsewhere. diff --git a/src/base/construction_slice.rs b/src/base/construction_slice.rs index 4f745a65..029abc69 100644 --- a/src/base/construction_slice.rs +++ b/src/base/construction_slice.rs @@ -8,7 +8,7 @@ use num_rational::Ratio; * Slice constructors. * */ -impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMN<'a, N, R, C, RStride, CStride> { /// Creates, without bound-checking, a matrix slice from an array and with dimensions and strides specified by generic types instances. @@ -61,7 +61,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> } } -impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMutMN<'a, N, R, C, RStride, CStride> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions and strides specified by generic types instances. @@ -133,7 +133,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> } } -impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> { /// Creates, without bound-checking, a matrix slice from an array and with dimensions specified by generic types instances. /// /// This method is unsafe because the input data array is not checked to contain enough elements. @@ -159,7 +159,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> { } } -impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions specified by generic types instances. /// /// This method is unsafe because the input data array is not checked to contain enough elements. @@ -187,7 +187,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> { macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, N: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMN<'a, N, $($Dims),*> { + impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound),*> MatrixSliceMN<'a, N, $($Dims),*> { /// Creates a new matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -203,7 +203,7 @@ macro_rules! impl_constructors( } } - impl<'a, N: Scalar, $($DimIdent: $DimBound, )*> MatrixSliceMN<'a, N, $($Dims,)* Dynamic, Dynamic> { + impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound, )*> MatrixSliceMN<'a, N, $($Dims,)* Dynamic, Dynamic> { /// Creates a new matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -244,7 +244,7 @@ impl_constructors!(Dynamic, Dynamic; macro_rules! impl_constructors_mut( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, N: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> { + impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> { /// Creates a new mutable matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -260,7 +260,7 @@ macro_rules! impl_constructors_mut( } } - impl<'a, N: Scalar, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, N, $($Dims,)* Dynamic, Dynamic> { + impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, N, $($Dims,)* Dynamic, Dynamic> { /// Creates a new mutable matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 7763a086..883f3fb8 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -31,8 +31,8 @@ where C1: Dim, R2: Dim, C2: Dim, - N1: Scalar, - N2: Scalar + SupersetOf, + N1: Scalar + Copy, + N2: Scalar + Copy + SupersetOf, DefaultAllocator: Allocator + Allocator + SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -75,7 +75,7 @@ where } } -impl<'a, N: Scalar, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix { type Item = &'a N; type IntoIter = MatrixIter<'a, N, R, C, S>; @@ -85,7 +85,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Ma } } -impl<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut> IntoIterator +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut> IntoIterator for &'a mut Matrix { type Item = &'a mut N; @@ -100,7 +100,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut> IntoIterator macro_rules! impl_from_into_asref_1D( ($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$( impl From<[N; $SZ]> for MatrixMN - where N: Scalar, + where N: Scalar + Copy, DefaultAllocator: Allocator { #[inline] fn from(arr: [N; $SZ]) -> Self { @@ -114,7 +114,7 @@ macro_rules! impl_from_into_asref_1D( } impl Into<[N; $SZ]> for Matrix - where N: Scalar, + where N: Scalar + Copy, S: ContiguousStorage { #[inline] fn into(self) -> [N; $SZ] { @@ -128,7 +128,7 @@ macro_rules! impl_from_into_asref_1D( } impl AsRef<[N; $SZ]> for Matrix - where N: Scalar, + where N: Scalar + Copy, S: ContiguousStorage { #[inline] fn as_ref(&self) -> &[N; $SZ] { @@ -139,7 +139,7 @@ macro_rules! impl_from_into_asref_1D( } impl AsMut<[N; $SZ]> for Matrix - where N: Scalar, + where N: Scalar + Copy, S: ContiguousStorageMut { #[inline] fn as_mut(&mut self) -> &mut [N; $SZ] { @@ -168,7 +168,7 @@ impl_from_into_asref_1D!( macro_rules! impl_from_into_asref_2D( ($(($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr));* $(;)*) => {$( - impl From<[[N; $SZRows]; $SZCols]> for MatrixMN + impl From<[[N; $SZRows]; $SZCols]> for MatrixMN where DefaultAllocator: Allocator { #[inline] fn from(arr: [[N; $SZRows]; $SZCols]) -> Self { @@ -181,7 +181,7 @@ macro_rules! impl_from_into_asref_2D( } } - impl Into<[[N; $SZRows]; $SZCols]> for Matrix + impl Into<[[N; $SZRows]; $SZCols]> for Matrix where S: ContiguousStorage { #[inline] fn into(self) -> [[N; $SZRows]; $SZCols] { @@ -194,7 +194,7 @@ macro_rules! impl_from_into_asref_2D( } } - impl AsRef<[[N; $SZRows]; $SZCols]> for Matrix + impl AsRef<[[N; $SZRows]; $SZCols]> for Matrix where S: ContiguousStorage { #[inline] fn as_ref(&self) -> &[[N; $SZRows]; $SZCols] { @@ -204,7 +204,7 @@ macro_rules! impl_from_into_asref_2D( } } - impl AsMut<[[N; $SZRows]; $SZCols]> for Matrix + impl AsMut<[[N; $SZRows]; $SZCols]> for Matrix where S: ContiguousStorageMut { #[inline] fn as_mut(&mut self) -> &mut [[N; $SZRows]; $SZCols] { @@ -229,7 +229,7 @@ impl_from_into_asref_2D!( macro_rules! impl_from_into_mint_1D( ($($NRows: ident => $VT:ident [$SZ: expr]);* $(;)*) => {$( impl From> for MatrixMN - where N: Scalar, + where N: Scalar + Copy, DefaultAllocator: Allocator { #[inline] fn from(v: mint::$VT) -> Self { @@ -243,7 +243,7 @@ macro_rules! impl_from_into_mint_1D( } impl Into> for Matrix - where N: Scalar, + where N: Scalar + Copy, S: ContiguousStorage { #[inline] fn into(self) -> mint::$VT { @@ -257,7 +257,7 @@ macro_rules! impl_from_into_mint_1D( } impl AsRef> for Matrix - where N: Scalar, + where N: Scalar + Copy, S: ContiguousStorage { #[inline] fn as_ref(&self) -> &mint::$VT { @@ -268,7 +268,7 @@ macro_rules! impl_from_into_mint_1D( } impl AsMut> for Matrix - where N: Scalar, + where N: Scalar + Copy, S: ContiguousStorageMut { #[inline] fn as_mut(&mut self) -> &mut mint::$VT { @@ -292,7 +292,7 @@ impl_from_into_mint_1D!( macro_rules! impl_from_into_mint_2D( ($(($NRows: ty, $NCols: ty) => $MV:ident{ $($component:ident),* }[$SZRows: expr]);* $(;)*) => {$( impl From> for MatrixMN - where N: Scalar, + where N: Scalar + Copy, DefaultAllocator: Allocator { #[inline] fn from(m: mint::$MV) -> Self { @@ -310,7 +310,7 @@ macro_rules! impl_from_into_mint_2D( } impl Into> for MatrixMN - where N: Scalar, + where N: Scalar + Copy, DefaultAllocator: Allocator { #[inline] fn into(self) -> mint::$MV { @@ -342,7 +342,7 @@ impl_from_into_mint_2D!( impl<'a, N, R, C, RStride, CStride> From> for Matrix> where - N: Scalar, + N: Scalar + Copy, R: DimName, C: DimName, RStride: Dim, @@ -359,7 +359,7 @@ where impl<'a, N, C, RStride, CStride> From> for Matrix> where - N: Scalar, + N: Scalar + Copy, C: Dim, RStride: Dim, CStride: Dim, @@ -373,7 +373,7 @@ where impl<'a, N, R, RStride, CStride> From> for Matrix> where - N: Scalar, + N: Scalar + Copy, R: DimName, RStride: Dim, CStride: Dim, @@ -386,7 +386,7 @@ where impl<'a, N, R, C, RStride, CStride> From> for Matrix> where - N: Scalar, + N: Scalar + Copy, R: DimName, C: DimName, RStride: Dim, @@ -403,7 +403,7 @@ where impl<'a, N, C, RStride, CStride> From> for Matrix> where - N: Scalar, + N: Scalar + Copy, C: Dim, RStride: Dim, CStride: Dim, @@ -417,7 +417,7 @@ where impl<'a, N, R, RStride, CStride> From> for Matrix> where - N: Scalar, + N: Scalar + Copy, R: DimName, RStride: Dim, CStride: Dim, @@ -430,7 +430,7 @@ where impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix> for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> where - N: Scalar, + N: Scalar + Copy, R: Dim, C: Dim, RSlice: Dim, @@ -463,7 +463,7 @@ for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> where - N: Scalar, + N: Scalar + Copy, R: Dim, C: Dim, RSlice: Dim, @@ -496,7 +496,7 @@ for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> for MatrixSliceMut<'a, N, RSlice, CSlice, RStride, CStride> where - N: Scalar, + N: Scalar + Copy, R: Dim, C: Dim, RSlice: Dim, diff --git a/src/base/coordinates.rs b/src/base/coordinates.rs index 832723e3..38c7edac 100644 --- a/src/base/coordinates.rs +++ b/src/base/coordinates.rs @@ -24,7 +24,7 @@ macro_rules! coords_impl( #[repr(C)] #[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] - pub struct $T { + pub struct $T { $(pub $comps: N),* } } @@ -32,7 +32,7 @@ macro_rules! coords_impl( macro_rules! deref_impl( ($R: ty, $C: ty; $Target: ident) => { - impl Deref for Matrix + impl Deref for Matrix where S: ContiguousStorage { type Target = $Target; @@ -42,7 +42,7 @@ macro_rules! deref_impl( } } - impl DerefMut for Matrix + impl DerefMut for Matrix where S: ContiguousStorageMut { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index c07c8708..ee6786c5 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -36,7 +36,7 @@ pub struct DefaultAllocator; // Static - Static impl Allocator for DefaultAllocator where - N: Scalar, + N: Scalar + Copy, R: DimName, C: DimName, R::Value: Mul, @@ -76,7 +76,7 @@ where // Dynamic - Static // Dynamic - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; #[inline] @@ -107,7 +107,7 @@ impl Allocator for DefaultAllocator { // Static - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; #[inline] @@ -142,7 +142,7 @@ impl Allocator for DefaultAllocator { * */ // Anything -> Static × Static -impl Reallocator for DefaultAllocator +impl Reallocator for DefaultAllocator where RFrom: Dim, CFrom: Dim, @@ -173,7 +173,7 @@ where // Static × Static -> Dynamic × Any #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator for DefaultAllocator +impl Reallocator for DefaultAllocator where RFrom: DimName, CFrom: DimName, @@ -202,7 +202,7 @@ where // Static × Static -> Static × Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator for DefaultAllocator +impl Reallocator for DefaultAllocator where RFrom: DimName, CFrom: DimName, @@ -231,7 +231,7 @@ where // All conversion from a dynamic buffer to a dynamic buffer. #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -247,7 +247,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -263,7 +263,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -279,7 +279,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] diff --git a/src/base/edition.rs b/src/base/edition.rs index e473246f..cc4d4295 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -18,7 +18,7 @@ use crate::base::storage::{Storage, StorageMut}; use crate::base::DMatrix; use crate::base::{DefaultAllocator, Matrix, MatrixMN, RowVector, Scalar, Vector}; -impl> Matrix { +impl> Matrix { /// Extracts the upper triangular part of this matrix (including the diagonal). #[inline] pub fn upper_triangle(&self) -> MatrixMN @@ -92,7 +92,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Sets all the elements of this matrix to `val`. #[inline] pub fn fill(&mut self, val: N) { @@ -253,7 +253,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Copies the upper-triangle of this matrix to its lower-triangular part. /// /// This makes the matrix symmetric. Panics if the matrix is not square. @@ -291,7 +291,7 @@ impl> Matrix { * FIXME: specialize all the following for slices. * */ -impl> Matrix { +impl> Matrix { /* * * Column removal. @@ -797,7 +797,7 @@ impl> Matrix { } #[cfg(any(feature = "std", feature = "alloc"))] -impl DMatrix { +impl DMatrix { /// Resizes this matrix in-place. /// /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more @@ -814,7 +814,7 @@ impl DMatrix { } #[cfg(any(feature = "std", feature = "alloc"))] -impl MatrixMN +impl MatrixMN where DefaultAllocator: Allocator { /// Changes the number of rows of this matrix in-place. @@ -835,7 +835,7 @@ where DefaultAllocator: Allocator } #[cfg(any(feature = "std", feature = "alloc"))] -impl MatrixMN +impl MatrixMN where DefaultAllocator: Allocator { /// Changes the number of column of this matrix in-place. @@ -855,7 +855,7 @@ where DefaultAllocator: Allocator } } -unsafe fn compress_rows( +unsafe fn compress_rows( data: &mut [N], nrows: usize, ncols: usize, @@ -895,7 +895,7 @@ unsafe fn compress_rows( // Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index. // The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements. -unsafe fn extend_rows( +unsafe fn extend_rows( data: &mut [N], nrows: usize, ncols: usize, @@ -938,7 +938,7 @@ unsafe fn extend_rows( #[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where - N: Scalar, + N: Scalar + Copy, R: Dim, S: Extend, { @@ -986,7 +986,7 @@ where #[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where - N: Scalar, + N: Scalar + Copy, S: Extend, { /// Extend the number of rows of a `Vector` with elements @@ -1007,7 +1007,7 @@ where #[cfg(any(feature = "std", feature = "alloc"))] impl Extend> for Matrix where - N: Scalar, + N: Scalar + Copy, R: Dim, S: Extend>, RV: Dim, diff --git a/src/base/indexing.rs b/src/base/indexing.rs index ca786530..8ce52454 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -267,7 +267,7 @@ fn dimrange_rangetoinclusive_usize() { } /// A helper trait used for indexing operations. -pub trait MatrixIndex<'a, N: Scalar, R: Dim, C: Dim, S: Storage>: Sized { +pub trait MatrixIndex<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage>: Sized { /// The output type returned by methods. type Output : 'a; @@ -303,7 +303,7 @@ pub trait MatrixIndex<'a, N: Scalar, R: Dim, C: Dim, S: Storage>: Sized } /// A helper trait used for indexing operations. -pub trait MatrixIndexMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut>: MatrixIndex<'a, N, R, C, S> { +pub trait MatrixIndexMut<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut>: MatrixIndex<'a, N, R, C, S> { /// The output type returned by methods. type OutputMut : 'a; @@ -432,7 +432,7 @@ pub trait MatrixIndexMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut>: /// 4, 7, /// 5, 8))); /// ``` -impl> Matrix +impl> Matrix { /// Produces a view of the data at the given index, or /// `None` if the index is out of bounds. @@ -502,7 +502,7 @@ impl> Matrix impl<'a, N, R, C, S> MatrixIndex<'a, N, R, C, S> for usize where - N: Scalar, + N: Scalar + Copy, R: Dim, C: Dim, S: Storage @@ -524,7 +524,7 @@ where impl<'a, N, R, C, S> MatrixIndexMut<'a, N, R, C, S> for usize where - N: Scalar, + N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut @@ -544,7 +544,7 @@ where impl<'a, N, R, C, S> MatrixIndex<'a, N, R, C, S> for (usize, usize) where - N: Scalar, + N: Scalar + Copy, R: Dim, C: Dim, S: Storage @@ -569,7 +569,7 @@ where impl<'a, N, R, C, S> MatrixIndexMut<'a, N, R, C, S> for (usize, usize) where - N: Scalar, + N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut @@ -607,7 +607,7 @@ macro_rules! impl_index_pair { { impl<'a, N, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, N, $R, $C, S> for ($RIdx, $CIdx) where - N: Scalar, + N: Scalar + Copy, $R: Dim, $C: Dim, S: Storage, @@ -643,7 +643,7 @@ macro_rules! impl_index_pair { impl<'a, N, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndexMut<'a, N, $R, $C, S> for ($RIdx, $CIdx) where - N: Scalar, + N: Scalar + Copy, $R: Dim, $C: Dim, S: StorageMut, diff --git a/src/base/iter.rs b/src/base/iter.rs index 74e4f018..966f97e0 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -10,7 +10,7 @@ use crate::base::{Scalar, Matrix, MatrixSlice, MatrixSliceMut}; macro_rules! iterator { (struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => { /// An iterator through a dense matrix with arbitrary strides matrix. - pub struct $Name<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> { + pub struct $Name<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage> { ptr: $Ptr, inner_ptr: $Ptr, inner_end: $Ptr, @@ -21,7 +21,7 @@ macro_rules! iterator { // FIXME: we need to specialize for the case where the matrix storage is owned (in which // case the iterator is trivial because it does not have any stride). - impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, N, R, C, S> { + impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, N, R, C, S> { /// Creates a new iterator for the given matrix storage. pub fn new(storage: $SRef) -> $Name<'a, N, R, C, S> { let shape = storage.shape(); @@ -40,7 +40,7 @@ macro_rules! iterator { } } - impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> Iterator + impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage> Iterator for $Name<'a, N, R, C, S> { type Item = $Ref; @@ -83,7 +83,7 @@ macro_rules! iterator { } } - impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator + impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator for $Name<'a, N, R, C, S> { #[inline] @@ -105,12 +105,12 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut N, &'a mut N, &'a */ #[derive(Clone)] /// An iterator through the rows of a matrix. -pub struct RowIter<'a, N: Scalar, R: Dim, C: Dim, S: Storage> { +pub struct RowIter<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage> { mat: &'a Matrix, curr: usize } -impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, N, R, C, S> { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, N, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { RowIter { mat, curr: 0 @@ -119,7 +119,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, N, R, } -impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, N, R, C, S> { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, N, R, C, S> { type Item = MatrixSlice<'a, N, U1, C, S::RStride, S::CStride>; #[inline] @@ -144,7 +144,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIt } } -impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for RowIter<'a, N, R, C, S> { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for RowIter<'a, N, R, C, S> { #[inline] fn len(&self) -> usize { self.mat.nrows() - self.curr @@ -153,13 +153,13 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator /// An iterator through the mutable rows of a matrix. -pub struct RowIterMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut> { +pub struct RowIterMut<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix> } -impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, N, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { RowIterMut { mat, @@ -176,7 +176,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, } -impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut<'a, N, R, C, S> { type Item = MatrixSliceMut<'a, N, U1, C, S::RStride, S::CStride>; #[inline] @@ -201,7 +201,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for Ro } } -impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for RowIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for RowIterMut<'a, N, R, C, S> { #[inline] fn len(&self) -> usize { self.nrows() - self.curr @@ -216,12 +216,12 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterat */ #[derive(Clone)] /// An iterator through the columns of a matrix. -pub struct ColumnIter<'a, N: Scalar, R: Dim, C: Dim, S: Storage> { +pub struct ColumnIter<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage> { mat: &'a Matrix, curr: usize } -impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, N, R, C, S> { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, N, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { ColumnIter { mat, curr: 0 @@ -230,7 +230,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, N, } -impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a, N, R, C, S> { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a, N, R, C, S> { type Item = MatrixSlice<'a, N, R, U1, S::RStride, S::CStride>; #[inline] @@ -255,7 +255,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator for Colum } } -impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for ColumnIter<'a, N, R, C, S> { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for ColumnIter<'a, N, R, C, S> { #[inline] fn len(&self) -> usize { self.mat.ncols() - self.curr @@ -264,13 +264,13 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator /// An iterator through the mutable columns of a matrix. -pub struct ColumnIterMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut> { +pub struct ColumnIterMut<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix> } -impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, N, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { ColumnIterMut { mat, @@ -287,7 +287,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<' } -impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for ColumnIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for ColumnIterMut<'a, N, R, C, S> { type Item = MatrixSliceMut<'a, N, R, U1, S::RStride, S::CStride>; #[inline] @@ -312,7 +312,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for Co } } -impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for ColumnIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for ColumnIterMut<'a, N, R, C, S> { #[inline] fn len(&self) -> usize { self.ncols() - self.curr diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 54241436..08c22cbb 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -73,7 +73,7 @@ pub type MatrixCross = /// some concrete types for `N` and a compatible data storage type `S`). #[repr(C)] #[derive(Clone, Copy)] -pub struct Matrix { +pub struct Matrix { /// The data storage that contains all the matrix components and informations about its number /// of rows and column (if needed). pub data: S, @@ -81,7 +81,7 @@ pub struct Matrix { _phantoms: PhantomData<(N, R, C)>, } -impl fmt::Debug for Matrix { +impl fmt::Debug for Matrix { fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { formatter .debug_struct("Matrix") @@ -93,7 +93,7 @@ impl fmt::Debug for Matrix #[cfg(feature = "serde-serialize")] impl Serialize for Matrix where - N: Scalar, + N: Scalar + Copy, R: Dim, C: Dim, S: Serialize, @@ -107,7 +107,7 @@ where #[cfg(feature = "serde-serialize")] impl<'de, N, R, C, S> Deserialize<'de> for Matrix where - N: Scalar, + N: Scalar + Copy, R: Dim, C: Dim, S: Deserialize<'de>, @@ -122,7 +122,7 @@ where } #[cfg(feature = "abomonation-serialize")] -impl Abomonation for Matrix { +impl Abomonation for Matrix { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { self.data.entomb(writer) } @@ -136,7 +136,7 @@ impl Abomonation for Matrix Matrix { +impl Matrix { /// Creates a new matrix with the given data without statically checking that the matrix /// dimension matches the storage dimension. #[inline] @@ -148,7 +148,7 @@ impl Matrix { } } -impl> Matrix { +impl> Matrix { /// Creates a new matrix with the given data. #[inline] pub fn from_data(data: S) -> Self { @@ -413,7 +413,7 @@ impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] - pub fn map N2>(&self, mut f: F) -> MatrixMN + pub fn map N2>(&self, mut f: F) -> MatrixMN where DefaultAllocator: Allocator { let (nrows, ncols) = self.data.shape(); @@ -434,7 +434,7 @@ impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. Unlike `map`, /// `f` also gets passed the row and column index, i.e. `f(row, col, value)`. #[inline] - pub fn map_with_location N2>( + pub fn map_with_location N2>( &self, mut f: F, ) -> MatrixMN @@ -462,8 +462,8 @@ impl> Matrix { #[inline] pub fn zip_map(&self, rhs: &Matrix, mut f: F) -> MatrixMN where - N2: Scalar, - N3: Scalar, + N2: Scalar + Copy, + N3: Scalar + Copy, S2: Storage, F: FnMut(N, N2) -> N3, DefaultAllocator: Allocator, @@ -500,9 +500,9 @@ impl> Matrix { mut f: F, ) -> MatrixMN where - N2: Scalar, - N3: Scalar, - N4: Scalar, + N2: Scalar + Copy, + N3: Scalar + Copy, + N4: Scalar + Copy, S2: Storage, S3: Storage, F: FnMut(N, N2, N3) -> N4, @@ -555,7 +555,7 @@ impl> Matrix { #[inline] pub fn zip_fold(&self, rhs: &Matrix, init: Acc, mut f: impl FnMut(Acc, N, N2) -> Acc) -> Acc where - N2: Scalar, + N2: Scalar + Copy, R2: Dim, C2: Dim, S2: Storage, @@ -623,7 +623,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Mutably iterates through this matrix coordinates. #[inline] pub fn iter_mut(&mut self) -> MatrixIterMut { @@ -797,7 +797,7 @@ impl> Matrix { /// joined with the components from `rhs`. #[inline] pub fn zip_apply(&mut self, rhs: &Matrix, mut f: impl FnMut(N, N2) -> N) - where N2: Scalar, + where N2: Scalar + Copy, R2: Dim, C2: Dim, S2: Storage, @@ -825,11 +825,11 @@ impl> Matrix { /// joined with the components from `b` and `c`. #[inline] pub fn zip_zip_apply(&mut self, b: &Matrix, c: &Matrix, mut f: impl FnMut(N, N2, N3) -> N) - where N2: Scalar, + where N2: Scalar + Copy, R2: Dim, C2: Dim, S2: Storage, - N3: Scalar, + N3: Scalar + Copy, R3: Dim, C3: Dim, S3: Storage, @@ -859,7 +859,7 @@ impl> Matrix { } } -impl> Vector { +impl> Vector { /// Gets a reference to the i-th element of this column vector without bound checking. #[inline] pub unsafe fn vget_unchecked(&self, i: usize) -> &N { @@ -869,7 +869,7 @@ impl> Vector { } } -impl> Vector { +impl> Vector { /// Gets a mutable reference to the i-th element of this column vector without bound checking. #[inline] pub unsafe fn vget_unchecked_mut(&mut self, i: usize) -> &mut N { @@ -879,7 +879,7 @@ impl> Vector { } } -impl> Matrix { +impl> Matrix { /// Extracts a slice containing the entire matrix entries ordered column-by-columns. #[inline] pub fn as_slice(&self) -> &[N] { @@ -887,7 +887,7 @@ impl> Matrix> Matrix { +impl> Matrix { /// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns. #[inline] pub fn as_mut_slice(&mut self) -> &mut [N] { @@ -895,7 +895,7 @@ impl> Matrix> Matrix { +impl> Matrix { /// Transposes the square matrix `self` in-place. pub fn transpose_mut(&mut self) { assert!( @@ -1052,7 +1052,7 @@ impl> Matrix { } } -impl> SquareMatrix { +impl> SquareMatrix { /// The diagonal of this matrix. #[inline] pub fn diagonal(&self) -> VectorN @@ -1064,7 +1064,7 @@ impl> SquareMatrix { /// /// This is a more efficient version of `self.diagonal().map(f)` since this /// allocates only once. - pub fn map_diagonal(&self, mut f: impl FnMut(N) -> N2) -> VectorN + pub fn map_diagonal(&self, mut f: impl FnMut(N) -> N2) -> VectorN where DefaultAllocator: Allocator { assert!( self.is_square(), @@ -1128,7 +1128,7 @@ impl> SquareMatrix { } } -impl + IsNotStaticOne, S: Storage> Matrix { +impl + IsNotStaticOne, S: Storage> Matrix { /// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and /// and setting the diagonal element to `1`. @@ -1144,7 +1144,7 @@ impl + IsNotStaticOne, S: Storage } -impl, S: Storage> Vector { +impl, S: Storage> Vector { /// Computes the coordinates in projective space of this vector, i.e., appends a `0` to its /// coordinates. #[inline] @@ -1170,7 +1170,7 @@ impl, S: Storage> Vector { } } -impl, S: Storage> Vector { +impl, S: Storage> Vector { /// Constructs a new vector of higher dimension by appending `element` to the end of `self`. #[inline] pub fn push(&self, element: N) -> VectorN> @@ -1188,7 +1188,7 @@ impl, S: Storage> Vector { impl AbsDiffEq for Matrix where - N: Scalar + AbsDiffEq, + N: Scalar + Copy + AbsDiffEq, S: Storage, N::Epsilon: Copy, { @@ -1209,7 +1209,7 @@ where impl RelativeEq for Matrix where - N: Scalar + RelativeEq, + N: Scalar + Copy + RelativeEq, S: Storage, N::Epsilon: Copy, { @@ -1232,7 +1232,7 @@ where impl UlpsEq for Matrix where - N: Scalar + UlpsEq, + N: Scalar + Copy + UlpsEq, S: Storage, N::Epsilon: Copy, { @@ -1252,7 +1252,7 @@ where impl PartialOrd for Matrix where - N: Scalar + PartialOrd, + N: Scalar + Copy + PartialOrd, S: Storage, { #[inline] @@ -1340,13 +1340,13 @@ where impl Eq for Matrix where - N: Scalar + Eq, + N: Scalar + Copy + Eq, S: Storage, {} impl PartialEq for Matrix where - N: Scalar, + N: Scalar + Copy, S: Storage, { #[inline] @@ -1363,13 +1363,13 @@ macro_rules! impl_fmt { ($trait: path, $fmt_str_without_precision: expr, $fmt_str_with_precision: expr) => { impl $trait for Matrix where - N: Scalar + $trait, + N: Scalar + Copy + $trait, S: Storage, DefaultAllocator: Allocator, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { #[cfg(feature = "std")] - fn val_width(val: N, f: &mut fmt::Formatter) -> usize { + fn val_width(val: N, f: &mut fmt::Formatter) -> usize { match f.precision() { Some(precision) => format!($fmt_str_with_precision, val, precision).chars().count(), None => format!($fmt_str_without_precision, val).chars().count(), @@ -1377,7 +1377,7 @@ macro_rules! impl_fmt { } #[cfg(not(feature = "std"))] - fn val_width(_: N, _: &mut fmt::Formatter) -> usize { + fn val_width(_: N, _: &mut fmt::Formatter) -> usize { 4 } @@ -1454,7 +1454,7 @@ fn lower_exp() { ") } -impl> Matrix { +impl> Matrix { /// The perpendicular product between two 2D column vectors, i.e. `a.x * b.y - a.y * b.x`. #[inline] pub fn perp(&self, b: &Matrix) -> N @@ -1545,7 +1545,7 @@ impl> Matrix { } } -impl> Vector +impl> Vector where DefaultAllocator: Allocator { /// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`. @@ -1593,7 +1593,7 @@ impl> Matrix { } } -impl> +impl> Vector { /// Returns `self * (1.0 - t) + rhs * t`, i.e., the linear blend of the vectors x and y using the scalar value a. @@ -1683,7 +1683,7 @@ impl> Unit> { impl AbsDiffEq for Unit> where - N: Scalar + AbsDiffEq, + N: Scalar + Copy + AbsDiffEq, S: Storage, N::Epsilon: Copy, { @@ -1702,7 +1702,7 @@ where impl RelativeEq for Unit> where - N: Scalar + RelativeEq, + N: Scalar + Copy + RelativeEq, S: Storage, N::Epsilon: Copy, { @@ -1726,7 +1726,7 @@ where impl UlpsEq for Unit> where - N: Scalar + UlpsEq, + N: Scalar + Copy + UlpsEq, S: Storage, N::Epsilon: Copy, { @@ -1743,7 +1743,7 @@ where impl Hash for Matrix where - N: Scalar + Hash, + N: Scalar + Copy + Hash, R: Dim, C: Dim, S: Storage, diff --git a/src/base/matrix_alga.rs b/src/base/matrix_alga.rs index ac6aced7..330c5f94 100644 --- a/src/base/matrix_alga.rs +++ b/src/base/matrix_alga.rs @@ -25,7 +25,7 @@ use crate::base::{DefaultAllocator, MatrixMN, MatrixN, Scalar}; */ impl Identity for MatrixMN where - N: Scalar + Zero, + N: Scalar + Copy + Zero, DefaultAllocator: Allocator, { #[inline] @@ -36,7 +36,7 @@ where impl AbstractMagma for MatrixMN where - N: Scalar + ClosedAdd, + N: Scalar + Copy + ClosedAdd, DefaultAllocator: Allocator, { #[inline] @@ -47,7 +47,7 @@ where impl TwoSidedInverse for MatrixMN where - N: Scalar + ClosedNeg, + N: Scalar + Copy + ClosedNeg, DefaultAllocator: Allocator, { #[inline] @@ -64,7 +64,7 @@ where macro_rules! inherit_additive_structure( ($($marker: ident<$operator: ident> $(+ $bounds: ident)*),* $(,)*) => {$( impl $marker<$operator> for MatrixMN - where N: Scalar + $marker<$operator> $(+ $bounds)*, + where N: Scalar + Copy + $marker<$operator> $(+ $bounds)*, DefaultAllocator: Allocator { } )*} ); @@ -80,7 +80,7 @@ inherit_additive_structure!( impl AbstractModule for MatrixMN where - N: Scalar + RingCommutative, + N: Scalar + Copy + RingCommutative, DefaultAllocator: Allocator, { type AbstractRing = N; @@ -93,7 +93,7 @@ where impl Module for MatrixMN where - N: Scalar + RingCommutative, + N: Scalar + Copy + RingCommutative, DefaultAllocator: Allocator, { type Ring = N; @@ -101,7 +101,7 @@ where impl VectorSpace for MatrixMN where - N: Scalar + Field, + N: Scalar + Copy + Field, DefaultAllocator: Allocator, { type Field = N; @@ -109,7 +109,7 @@ where impl FiniteDimVectorSpace for MatrixMN where - N: Scalar + Field, + N: Scalar + Copy + Field, DefaultAllocator: Allocator, { #[inline] @@ -329,7 +329,7 @@ where DefaultAllocator: Allocator */ impl Identity for MatrixN where - N: Scalar + Zero + One, + N: Scalar + Copy + Zero + One, DefaultAllocator: Allocator, { #[inline] @@ -340,7 +340,7 @@ where impl AbstractMagma for MatrixN where - N: Scalar + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, DefaultAllocator: Allocator, { #[inline] @@ -352,7 +352,7 @@ where macro_rules! impl_multiplicative_structure( ($($marker: ident<$operator: ident> $(+ $bounds: ident)*),* $(,)*) => {$( impl $marker<$operator> for MatrixN - where N: Scalar + Zero + One + ClosedAdd + ClosedMul + $marker<$operator> $(+ $bounds)*, + where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul + $marker<$operator> $(+ $bounds)*, DefaultAllocator: Allocator { } )*} ); @@ -369,7 +369,7 @@ impl_multiplicative_structure!( */ impl MeetSemilattice for MatrixMN where - N: Scalar + MeetSemilattice, + N: Scalar + Copy + MeetSemilattice, DefaultAllocator: Allocator, { #[inline] @@ -380,7 +380,7 @@ where impl JoinSemilattice for MatrixMN where - N: Scalar + JoinSemilattice, + N: Scalar + Copy + JoinSemilattice, DefaultAllocator: Allocator, { #[inline] @@ -391,7 +391,7 @@ where impl Lattice for MatrixMN where - N: Scalar + Lattice, + N: Scalar + Copy + Lattice, DefaultAllocator: Allocator, { #[inline] diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index be53034a..43131680 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -13,22 +13,22 @@ macro_rules! slice_storage_impl( ($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => { #[doc = $doc] #[derive(Debug)] - pub struct $T<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> { + pub struct $T<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> { ptr: $Ptr, shape: (R, C), strides: (RStride, CStride), _phantoms: PhantomData<$Ref>, } - unsafe impl<'a, N: Scalar + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send + unsafe impl<'a, N: Scalar + Copy + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send for $T<'a, N, R, C, RStride, CStride> {} - unsafe impl<'a, N: Scalar + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync + unsafe impl<'a, N: Scalar + Copy + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync for $T<'a, N, R, C, RStride, CStride> {} - impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, N, R, C, RStride, CStride> { + impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, N, R, C, RStride, CStride> { /// Create a new matrix slice without bound checking and from a raw pointer. #[inline] pub unsafe fn from_raw_parts(ptr: $Ptr, @@ -48,7 +48,7 @@ macro_rules! slice_storage_impl( } // Dynamic is arbitrary. It's just to be able to call the constructors with `Slice::` - impl<'a, N: Scalar, R: Dim, C: Dim> $T<'a, N, R, C, Dynamic, Dynamic> { + impl<'a, N: Scalar + Copy, R: Dim, C: Dim> $T<'a, N, R, C, Dynamic, Dynamic> { /// Create a new matrix slice without bound checking. #[inline] pub unsafe fn new_unchecked(storage: $SRef, start: (usize, usize), shape: (R, C)) @@ -89,12 +89,12 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut N as &'a mut N) ); -impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy for SliceStorage<'a, N, R, C, RStride, CStride> { } -impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone for SliceStorage<'a, N, R, C, RStride, CStride> { #[inline] @@ -110,7 +110,7 @@ impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone macro_rules! storage_impl( ($($T: ident),* $(,)*) => {$( - unsafe impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage + unsafe impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage for $T<'a, N, R, C, RStride, CStride> { type RStride = RStride; @@ -178,7 +178,7 @@ macro_rules! storage_impl( storage_impl!(SliceStorage, SliceStorageMut); -unsafe impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut +unsafe impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut for SliceStorageMut<'a, N, R, C, RStride, CStride> { #[inline] @@ -198,15 +198,15 @@ unsafe impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu } } -unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorage for SliceStorage<'a, N, R, U1, U1, CStride> { } -unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorage for SliceStorageMut<'a, N, R, U1, U1, CStride> { } -unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorageMut for SliceStorageMut<'a, N, R, U1, U1, CStride> { } +unsafe impl<'a, N: Scalar + Copy, R: Dim, CStride: Dim> ContiguousStorage for SliceStorage<'a, N, R, U1, U1, CStride> { } +unsafe impl<'a, N: Scalar + Copy, R: Dim, CStride: Dim> ContiguousStorage for SliceStorageMut<'a, N, R, U1, U1, CStride> { } +unsafe impl<'a, N: Scalar + Copy, R: Dim, CStride: Dim> ContiguousStorageMut for SliceStorageMut<'a, N, R, U1, U1, CStride> { } -unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorage<'a, N, R, C, U1, R> { } -unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorageMut<'a, N, R, C, U1, R> { } -unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut for SliceStorageMut<'a, N, R, C, U1, R> { } +unsafe impl<'a, N: Scalar + Copy, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorage<'a, N, R, C, U1, R> { } +unsafe impl<'a, N: Scalar + Copy, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorageMut<'a, N, R, C, U1, R> { } +unsafe impl<'a, N: Scalar + Copy, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut for SliceStorageMut<'a, N, R, C, U1, R> { } -impl> Matrix { +impl> Matrix { #[inline] fn assert_slice_index( &self, @@ -261,7 +261,7 @@ macro_rules! matrix_slice_impl( pub type $MatrixSlice<'a, N, R, C, RStride, CStride> = Matrix>; - impl> Matrix { + impl> Matrix { /* * * Row slicing. @@ -786,7 +786,7 @@ impl SliceRange for RangeFull { } } -impl> Matrix { +impl> Matrix { /// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed /// by the range `cols`. #[inline] @@ -827,7 +827,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns /// indexed by the range `cols`. pub fn slice_range_mut( @@ -871,7 +871,7 @@ impl> Matrix { impl<'a, N, R, C, RStride, CStride> From> for MatrixSlice<'a, N, R, C, RStride, CStride> where - N: Scalar, + N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, diff --git a/src/base/ops.rs b/src/base/ops.rs index ef0014c5..77b35e94 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -20,7 +20,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, MatrixSum, Scalar * Indexing. * */ -impl> Index for Matrix { +impl> Index for Matrix { type Output = N; #[inline] @@ -32,7 +32,7 @@ impl> Index for Matrix Index<(usize, usize)> for Matrix where - N: Scalar, + N: Scalar + Copy, S: Storage, { type Output = N; @@ -50,7 +50,7 @@ where } // Mutable versions. -impl> IndexMut for Matrix { +impl> IndexMut for Matrix { #[inline] fn index_mut(&mut self, i: usize) -> &mut N { let ij = self.vector_to_matrix_index(i); @@ -60,7 +60,7 @@ impl> IndexMut for Matr impl IndexMut<(usize, usize)> for Matrix where - N: Scalar, + N: Scalar + Copy, S: StorageMut, { #[inline] @@ -82,7 +82,7 @@ where */ impl Neg for Matrix where - N: Scalar + ClosedNeg, + N: Scalar + Copy + ClosedNeg, S: Storage, DefaultAllocator: Allocator, { @@ -98,7 +98,7 @@ where impl<'a, N, R: Dim, C: Dim, S> Neg for &'a Matrix where - N: Scalar + ClosedNeg, + N: Scalar + Copy + ClosedNeg, S: Storage, DefaultAllocator: Allocator, { @@ -112,7 +112,7 @@ where impl Matrix where - N: Scalar + ClosedNeg, + N: Scalar + Copy + ClosedNeg, S: StorageMut, { /// Negates `self` in-place. @@ -137,7 +137,7 @@ macro_rules! componentwise_binop_impl( $method_to: ident, $method_to_statically_unchecked: ident) => { impl> Matrix - where N: Scalar + $bound { + where N: Scalar + Copy + $bound { /* * @@ -267,7 +267,7 @@ macro_rules! componentwise_binop_impl( impl<'b, N, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix> for Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + $bound, + N: Scalar + Copy + $bound, SA: Storage, SB: Storage, DefaultAllocator: SameShapeAllocator, @@ -285,7 +285,7 @@ macro_rules! componentwise_binop_impl( impl<'a, N, R1, C1, R2, C2, SA, SB> $Trait> for &'a Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + $bound, + N: Scalar + Copy + $bound, SA: Storage, SB: Storage, DefaultAllocator: SameShapeAllocator, @@ -303,7 +303,7 @@ macro_rules! componentwise_binop_impl( impl $Trait> for Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + $bound, + N: Scalar + Copy + $bound, SA: Storage, SB: Storage, DefaultAllocator: SameShapeAllocator, @@ -318,7 +318,7 @@ macro_rules! componentwise_binop_impl( impl<'a, 'b, N, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix> for &'a Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + $bound, + N: Scalar + Copy + $bound, SA: Storage, SB: Storage, DefaultAllocator: SameShapeAllocator, @@ -341,7 +341,7 @@ macro_rules! componentwise_binop_impl( impl<'b, N, R1, C1, R2, C2, SA, SB> $TraitAssign<&'b Matrix> for Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + $bound, + N: Scalar + Copy + $bound, SA: StorageMut, SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { @@ -354,7 +354,7 @@ macro_rules! componentwise_binop_impl( impl $TraitAssign> for Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + $bound, + N: Scalar + Copy + $bound, SA: StorageMut, SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { @@ -376,7 +376,7 @@ componentwise_binop_impl!(Sub, sub, ClosedSub; impl iter::Sum for MatrixMN where - N: Scalar + ClosedAdd + Zero, + N: Scalar + Copy + ClosedAdd + Zero, DefaultAllocator: Allocator, { fn sum>>(iter: I) -> MatrixMN { @@ -386,7 +386,7 @@ where impl iter::Sum for MatrixMN where - N: Scalar + ClosedAdd + Zero, + N: Scalar + Copy + ClosedAdd + Zero, DefaultAllocator: Allocator, { /// # Example @@ -416,7 +416,7 @@ where impl<'a, N, R: DimName, C: DimName> iter::Sum<&'a MatrixMN> for MatrixMN where - N: Scalar + ClosedAdd + Zero, + N: Scalar + Copy + ClosedAdd + Zero, DefaultAllocator: Allocator, { fn sum>>(iter: I) -> MatrixMN { @@ -426,7 +426,7 @@ where impl<'a, N, C: Dim> iter::Sum<&'a MatrixMN> for MatrixMN where - N: Scalar + ClosedAdd + Zero, + N: Scalar + Copy + ClosedAdd + Zero, DefaultAllocator: Allocator, { /// # Example @@ -466,7 +466,7 @@ macro_rules! componentwise_scalarop_impl( ($Trait: ident, $method: ident, $bound: ident; $TraitAssign: ident, $method_assign: ident) => { impl $Trait for Matrix - where N: Scalar + $bound, + where N: Scalar + Copy + $bound, S: Storage, DefaultAllocator: Allocator { type Output = MatrixMN; @@ -490,7 +490,7 @@ macro_rules! componentwise_scalarop_impl( } impl<'a, N, R: Dim, C: Dim, S> $Trait for &'a Matrix - where N: Scalar + $bound, + where N: Scalar + Copy + $bound, S: Storage, DefaultAllocator: Allocator { type Output = MatrixMN; @@ -502,7 +502,7 @@ macro_rules! componentwise_scalarop_impl( } impl $TraitAssign for Matrix - where N: Scalar + $bound, + where N: Scalar + Copy + $bound, S: StorageMut { #[inline] fn $method_assign(&mut self, rhs: N) { @@ -561,7 +561,7 @@ left_scalar_mul_impl!(u8, u16, u32, u64, usize, i8, i16, i32, i64, isize, f32, f impl<'a, 'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix> for &'a Matrix where - N: Scalar + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, SA: Storage, SB: Storage, DefaultAllocator: Allocator, @@ -582,7 +582,7 @@ where impl<'a, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul> for &'a Matrix where - N: Scalar + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: Storage, DefaultAllocator: Allocator, @@ -599,7 +599,7 @@ where impl<'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix> for Matrix where - N: Scalar + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: Storage, DefaultAllocator: Allocator, @@ -616,7 +616,7 @@ where impl Mul> for Matrix where - N: Scalar + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: Storage, DefaultAllocator: Allocator, @@ -638,7 +638,7 @@ where R1: Dim, C1: Dim, R2: Dim, - N: Scalar + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: ContiguousStorageMut + Clone, ShapeConstraint: AreMultipliable, @@ -655,7 +655,7 @@ where R1: Dim, C1: Dim, R2: Dim, - N: Scalar + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: ContiguousStorageMut + Clone, ShapeConstraint: AreMultipliable, @@ -671,7 +671,7 @@ where // Transpose-multiplication. impl Matrix where - N: Scalar + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, SA: Storage, { /// Equivalent to `self.transpose() * rhs`. @@ -826,7 +826,7 @@ where } } -impl> Matrix { +impl> Matrix { /// Adds a scalar to `self`. #[inline] pub fn add_scalar(&self, rhs: N) -> MatrixMN @@ -848,7 +848,7 @@ impl> Matrix iter::Product for MatrixN where - N: Scalar + Zero + One + ClosedMul + ClosedAdd, + N: Scalar + Copy + Zero + One + ClosedMul + ClosedAdd, DefaultAllocator: Allocator, { fn product>>(iter: I) -> MatrixN { @@ -858,7 +858,7 @@ where impl<'a, N, D: DimName> iter::Product<&'a MatrixN> for MatrixN where - N: Scalar + Zero + One + ClosedMul + ClosedAdd, + N: Scalar + Copy + Zero + One + ClosedMul + ClosedAdd, DefaultAllocator: Allocator, { fn product>>(iter: I) -> MatrixN { @@ -866,7 +866,7 @@ where } } -impl> Matrix { +impl> Matrix { #[inline(always)] fn xcmp(&self, abs: impl Fn(N) -> N2, ordering: Ordering) -> N2 where N2: Scalar + PartialOrd + Zero { diff --git a/src/base/properties.rs b/src/base/properties.rs index 8ca49568..020e38d7 100644 --- a/src/base/properties.rs +++ b/src/base/properties.rs @@ -9,7 +9,7 @@ use crate::base::dimension::{Dim, DimMin}; use crate::base::storage::Storage; use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix}; -impl> Matrix { +impl> Matrix { /// Indicates if this is an empty matrix. #[inline] pub fn is_empty(&self) -> bool { diff --git a/src/base/scalar.rs b/src/base/scalar.rs index 47e3019c..a6c837ff 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -5,7 +5,7 @@ use std::fmt::Debug; /// The basic scalar type for all structures of `nalgebra`. /// /// This does not make any assumption on the algebraic properties of `Self`. -pub trait Scalar: Copy + PartialEq + Debug + Any { +pub trait Scalar: PartialEq + Debug + Any { #[inline] /// Tests if `Self` the same as the type `T` /// diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 0fe18130..d71697d7 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -3,7 +3,7 @@ use alga::general::{Field, SupersetOf}; use crate::storage::Storage; use crate::allocator::Allocator; -impl> Matrix { +impl> Matrix { /// Returns a row vector where each element is the result of the application of `f` on the /// corresponding column of the original matrix. #[inline] @@ -54,7 +54,7 @@ impl> Matrix { } } -impl, R: Dim, C: Dim, S: Storage> Matrix { +impl, R: Dim, C: Dim, S: Storage> Matrix { /* * * Sum computation. diff --git a/src/base/storage.rs b/src/base/storage.rs index 02941e47..f4d8551e 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -36,7 +36,7 @@ pub type CStride = /// should **not** allow the user to modify the size of the underlying buffer with safe methods /// (for example the `VecStorage::data_mut` method is unsafe because the user could change the /// vector's size so that it no longer contains enough elements: this will lead to UB. -pub unsafe trait Storage: Debug + Sized { +pub unsafe trait Storage: Debug + Sized { /// The static stride of this storage's rows. type RStride: Dim; @@ -117,7 +117,7 @@ pub unsafe trait Storage: Debug + Sized { /// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable /// matrix slice can provide mutable access to its elements even if it does not own its data (it /// contains only an internal reference to them). -pub unsafe trait StorageMut: Storage { +pub unsafe trait StorageMut: Storage { /// The matrix mutable data pointer. fn ptr_mut(&mut self) -> *mut N; @@ -175,7 +175,7 @@ pub unsafe trait StorageMut: Storage { /// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorage: +pub unsafe trait ContiguousStorage: Storage { } @@ -185,7 +185,7 @@ pub unsafe trait ContiguousStorage: /// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorageMut: +pub unsafe trait ContiguousStorageMut: ContiguousStorage + StorageMut { } diff --git a/src/base/swizzle.rs b/src/base/swizzle.rs index 4c9b0b63..4508c758 100644 --- a/src/base/swizzle.rs +++ b/src/base/swizzle.rs @@ -5,7 +5,7 @@ use typenum::{self, Cmp, Greater}; macro_rules! impl_swizzle { ($( where $BaseDim: ident: $( $name: ident() -> $Result: ident[$($i: expr),+] ),+ ;)* ) => { $( - impl> Vector + impl> Vector where D::Value: Cmp { $( diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index 2b4bf743..a0230488 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -102,7 +102,7 @@ impl Into> for VecStorage * Dynamic − Dynamic * */ -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where DefaultAllocator: Allocator { type RStride = U1; @@ -146,7 +146,7 @@ where DefaultAllocator: Allocator } } -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where DefaultAllocator: Allocator { type RStride = U1; @@ -195,7 +195,7 @@ where DefaultAllocator: Allocator * StorageMut, ContiguousStorage. * */ -unsafe impl StorageMut for VecStorage +unsafe impl StorageMut for VecStorage where DefaultAllocator: Allocator { #[inline] @@ -209,13 +209,13 @@ where DefaultAllocator: Allocator } } -unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator +unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator {} -unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator +unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator {} -unsafe impl StorageMut for VecStorage +unsafe impl StorageMut for VecStorage where DefaultAllocator: Allocator { #[inline] @@ -244,10 +244,10 @@ impl Abomonation for VecStorage { } } -unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator +unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator {} -unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator +unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator {} impl Extend for VecStorage @@ -270,7 +270,7 @@ impl Extend for VecStorage impl Extend> for VecStorage where - N: Scalar, + N: Scalar + Copy, R: Dim, RV: Dim, SV: Storage, diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index 421b041a..8a3e6486 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -12,7 +12,7 @@ use crate::linalg::givens::GivensRotation; /// A random orthogonal matrix. #[derive(Clone, Debug)] -pub struct RandomOrthogonal +pub struct RandomOrthogonal where DefaultAllocator: Allocator { m: MatrixN, diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index 47e3ca60..5875faf6 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -13,7 +13,7 @@ use crate::debug::RandomOrthogonal; /// A random, well-conditioned, symmetric definite-positive matrix. #[derive(Clone, Debug)] -pub struct RandomSDP +pub struct RandomSDP where DefaultAllocator: Allocator { m: MatrixN, diff --git a/src/geometry/op_macros.rs b/src/geometry/op_macros.rs index 873c6d7d..7ee81f07 100644 --- a/src/geometry/op_macros.rs +++ b/src/geometry/op_macros.rs @@ -18,7 +18,7 @@ macro_rules! md_impl( // Lifetime. $($lives: tt),*) => { impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs - where N: Scalar + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*, + where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*, DefaultAllocator: Allocator + Allocator + Allocator, @@ -96,7 +96,7 @@ macro_rules! md_assign_impl( // Actual implementation and lifetimes. $action: expr; $($lives: tt),*) => { impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs - where N: Scalar + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*, + where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*, DefaultAllocator: Allocator + Allocator, $( $ConstraintType: $ConstraintBound $(<$( $ConstraintBoundParams $( = $EqBound )*),*>)* ),* @@ -148,7 +148,7 @@ macro_rules! add_sub_impl( $lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty, Output = $Result: ty; $action: expr; $($lives: tt),*) => { impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs - where N: Scalar + $bound, + where N: Scalar + Copy + $bound, DefaultAllocator: Allocator + Allocator + SameShapeAllocator, @@ -172,7 +172,7 @@ macro_rules! add_sub_assign_impl( $lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty; $action: expr; $($lives: tt),*) => { impl<$($lives ,)* N $(, $Dims: $DimsBound)*> $Op<$Rhs> for $Lhs - where N: Scalar + $bound, + where N: Scalar + Copy + $bound, DefaultAllocator: Allocator + Allocator, ShapeConstraint: SameNumberOfRows<$R1, $R2> + SameNumberOfColumns<$C1, $C2> { diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index 8020c0cf..923da505 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -18,7 +18,7 @@ use crate::base::{Matrix4, Scalar, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; /// A 3D perspective projection stored as an homogeneous 4x4 matrix. -pub struct Perspective3 { +pub struct Perspective3 { matrix: Matrix4, } diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 04338d2a..48031ba9 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -20,14 +20,14 @@ use crate::base::{DefaultAllocator, Scalar, VectorN}; /// A point in a n-dimensional euclidean space. #[repr(C)] #[derive(Debug, Clone)] -pub struct Point +pub struct Point where DefaultAllocator: Allocator { /// The coordinates of this point, i.e., the shift from the origin. pub coords: VectorN, } -impl hash::Hash for Point +impl hash::Hash for Point where DefaultAllocator: Allocator, >::Buffer: hash::Hash, @@ -37,7 +37,7 @@ where } } -impl Copy for Point +impl Copy for Point where DefaultAllocator: Allocator, >::Buffer: Copy, @@ -45,7 +45,7 @@ where } #[cfg(feature = "serde-serialize")] -impl Serialize for Point +impl Serialize for Point where DefaultAllocator: Allocator, >::Buffer: Serialize, @@ -57,7 +57,7 @@ where } #[cfg(feature = "serde-serialize")] -impl<'a, N: Scalar, D: DimName> Deserialize<'a> for Point +impl<'a, N: Scalar + Copy, D: DimName> Deserialize<'a> for Point where DefaultAllocator: Allocator, >::Buffer: Deserialize<'a>, @@ -73,7 +73,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Point where - N: Scalar, + N: Scalar + Copy, D: DimName, VectorN: Abomonation, DefaultAllocator: Allocator, @@ -91,7 +91,7 @@ where } } -impl Point +impl Point where DefaultAllocator: Allocator { /// Converts this point into a vector in homogeneous coordinates, i.e., appends a `1` at the @@ -210,7 +210,7 @@ where DefaultAllocator: Allocator } } -impl AbsDiffEq for Point +impl AbsDiffEq for Point where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -228,7 +228,7 @@ where } } -impl RelativeEq for Point +impl RelativeEq for Point where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -251,7 +251,7 @@ where } } -impl UlpsEq for Point +impl UlpsEq for Point where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -267,9 +267,9 @@ where } } -impl Eq for Point where DefaultAllocator: Allocator {} +impl Eq for Point where DefaultAllocator: Allocator {} -impl PartialEq for Point +impl PartialEq for Point where DefaultAllocator: Allocator { #[inline] @@ -278,7 +278,7 @@ where DefaultAllocator: Allocator } } -impl PartialOrd for Point +impl PartialOrd for Point where DefaultAllocator: Allocator { #[inline] @@ -312,7 +312,7 @@ where DefaultAllocator: Allocator * Display * */ -impl fmt::Display for Point +impl fmt::Display for Point where DefaultAllocator: Allocator { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { diff --git a/src/geometry/point_alga.rs b/src/geometry/point_alga.rs index 162e6c68..0deb52d3 100644 --- a/src/geometry/point_alga.rs +++ b/src/geometry/point_alga.rs @@ -7,9 +7,9 @@ use crate::base::{DefaultAllocator, Scalar, VectorN}; use crate::geometry::Point; -impl AffineSpace for Point +impl AffineSpace for Point where - N: Scalar + Field, + N: Scalar + Copy + Field, DefaultAllocator: Allocator, { type Translation = VectorN; @@ -49,7 +49,7 @@ where DefaultAllocator: Allocator */ impl MeetSemilattice for Point where - N: Scalar + MeetSemilattice, + N: Scalar + Copy + MeetSemilattice, DefaultAllocator: Allocator, { #[inline] @@ -60,7 +60,7 @@ where impl JoinSemilattice for Point where - N: Scalar + JoinSemilattice, + N: Scalar + Copy + JoinSemilattice, DefaultAllocator: Allocator, { #[inline] @@ -71,7 +71,7 @@ where impl Lattice for Point where - N: Scalar + Lattice, + N: Scalar + Copy + Lattice, DefaultAllocator: Allocator, { #[inline] diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 2fac11d4..47d0e7e8 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -12,7 +12,7 @@ use crate::base::{DefaultAllocator, Scalar, VectorN}; use crate::geometry::Point; -impl Point +impl Point where DefaultAllocator: Allocator { /// Creates a new point with uninitialized coordinates. @@ -94,7 +94,7 @@ where DefaultAllocator: Allocator #[inline] pub fn from_homogeneous(v: VectorN>) -> Option where - N: Scalar + Zero + One + ClosedDiv, + N: Scalar + Copy + Zero + One + ClosedDiv, D: DimNameAdd, DefaultAllocator: Allocator>, { @@ -112,7 +112,7 @@ where DefaultAllocator: Allocator * Traits that build points. * */ -impl Bounded for Point +impl Bounded for Point where DefaultAllocator: Allocator { #[inline] @@ -126,7 +126,7 @@ where DefaultAllocator: Allocator } } -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -156,7 +156,7 @@ where */ macro_rules! componentwise_constructors_impl( ($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$( - impl Point + impl Point where DefaultAllocator: Allocator { #[doc = "Initializes this point from its components."] #[doc = "# Example\n```"] @@ -192,7 +192,7 @@ componentwise_constructors_impl!( macro_rules! from_array_impl( ($($D: ty, $len: expr);*) => {$( - impl From<[N; $len]> for Point { + impl From<[N; $len]> for Point { fn from (coords: [N; $len]) -> Self { Self { coords: coords.into() diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index 10438165..b3131f52 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -27,8 +27,8 @@ use std::convert::{AsMut, AsRef, From, Into}; impl SubsetOf> for Point where D: DimName, - N1: Scalar, - N2: Scalar + SupersetOf, + N1: Scalar + Copy, + N2: Scalar + Copy + SupersetOf, DefaultAllocator: Allocator + Allocator, { #[inline] @@ -52,8 +52,8 @@ where impl SubsetOf>> for Point where D: DimNameAdd, - N1: Scalar, - N2: Scalar + Zero + One + ClosedDiv + SupersetOf, + N1: Scalar + Copy, + N2: Scalar + Copy + Zero + One + ClosedDiv + SupersetOf, DefaultAllocator: Allocator + Allocator> + Allocator> @@ -83,7 +83,7 @@ where macro_rules! impl_from_into_mint_1D( ($($NRows: ident => $PT:ident, $VT:ident [$SZ: expr]);* $(;)*) => {$( impl From> for Point - where N: Scalar { + where N: Scalar + Copy { #[inline] fn from(p: mint::$PT) -> Self { Self { @@ -93,7 +93,7 @@ macro_rules! impl_from_into_mint_1D( } impl Into> for Point - where N: Scalar { + where N: Scalar + Copy { #[inline] fn into(self) -> mint::$PT { let mint_vec: mint::$VT = self.coords.into(); @@ -102,7 +102,7 @@ macro_rules! impl_from_into_mint_1D( } impl AsRef> for Point - where N: Scalar { + where N: Scalar + Copy { #[inline] fn as_ref(&self) -> &mint::$PT { unsafe { @@ -112,7 +112,7 @@ macro_rules! impl_from_into_mint_1D( } impl AsMut> for Point - where N: Scalar { + where N: Scalar + Copy { #[inline] fn as_mut(&mut self) -> &mut mint::$PT { unsafe { @@ -130,7 +130,7 @@ impl_from_into_mint_1D!( U3 => Point3, Vector3[3]; ); -impl From> for VectorN> +impl From> for VectorN> where D: DimNameAdd, DefaultAllocator: Allocator + Allocator>, @@ -141,7 +141,7 @@ where } } -impl From> for Point +impl From> for Point where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_coordinates.rs b/src/geometry/point_coordinates.rs index b56e120e..1b6edf67 100644 --- a/src/geometry/point_coordinates.rs +++ b/src/geometry/point_coordinates.rs @@ -16,7 +16,7 @@ use crate::geometry::Point; macro_rules! deref_impl( ($D: ty, $Target: ident $(, $comps: ident)*) => { - impl Deref for Point + impl Deref for Point where DefaultAllocator: Allocator { type Target = $Target; @@ -26,7 +26,7 @@ macro_rules! deref_impl( } } - impl DerefMut for Point + impl DerefMut for Point where DefaultAllocator: Allocator { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/geometry/point_ops.rs b/src/geometry/point_ops.rs index b49495f8..2a4fae03 100644 --- a/src/geometry/point_ops.rs +++ b/src/geometry/point_ops.rs @@ -18,7 +18,7 @@ use crate::geometry::Point; * Indexing. * */ -impl Index for Point +impl Index for Point where DefaultAllocator: Allocator { type Output = N; @@ -29,7 +29,7 @@ where DefaultAllocator: Allocator } } -impl IndexMut for Point +impl IndexMut for Point where DefaultAllocator: Allocator { #[inline] @@ -43,7 +43,7 @@ where DefaultAllocator: Allocator * Neg. * */ -impl Neg for Point +impl Neg for Point where DefaultAllocator: Allocator { type Output = Self; @@ -54,7 +54,7 @@ where DefaultAllocator: Allocator } } -impl<'a, N: Scalar + ClosedNeg, D: DimName> Neg for &'a Point +impl<'a, N: Scalar + Copy + ClosedNeg, D: DimName> Neg for &'a Point where DefaultAllocator: Allocator { type Output = Point; @@ -138,7 +138,7 @@ add_sub_impl!(Add, add, ClosedAdd; macro_rules! op_assign_impl( ($($TraitAssign: ident, $method_assign: ident, $bound: ident);* $(;)*) => {$( impl<'b, N, D1: DimName, D2: Dim, SB> $TraitAssign<&'b Vector> for Point - where N: Scalar + $bound, + where N: Scalar + Copy + $bound, SB: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows { @@ -150,7 +150,7 @@ macro_rules! op_assign_impl( } impl $TraitAssign> for Point - where N: Scalar + $bound, + where N: Scalar + Copy + $bound, SB: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows { @@ -192,7 +192,7 @@ md_impl_all!( macro_rules! componentwise_scalarop_impl( ($Trait: ident, $method: ident, $bound: ident; $TraitAssign: ident, $method_assign: ident) => { - impl $Trait for Point + impl $Trait for Point where DefaultAllocator: Allocator { type Output = Point; @@ -202,7 +202,7 @@ macro_rules! componentwise_scalarop_impl( } } - impl<'a, N: Scalar + $bound, D: DimName> $Trait for &'a Point + impl<'a, N: Scalar + Copy + $bound, D: DimName> $Trait for &'a Point where DefaultAllocator: Allocator { type Output = Point; @@ -212,7 +212,7 @@ macro_rules! componentwise_scalarop_impl( } } - impl $TraitAssign for Point + impl $TraitAssign for Point where DefaultAllocator: Allocator { #[inline] fn $method_assign(&mut self, right: N) { diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index b4658a11..9b4da872 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -8,7 +8,7 @@ use crate::storage::{Storage, StorageMut}; use crate::geometry::Point; /// A reflection wrt. a plane. -pub struct Reflection> { +pub struct Reflection> { axis: Vector, bias: N, } diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index ec9c8150..0081262b 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -24,13 +24,13 @@ use crate::geometry::Point; /// A rotation matrix. #[repr(C)] #[derive(Debug)] -pub struct Rotation +pub struct Rotation where DefaultAllocator: Allocator { matrix: MatrixN, } -impl hash::Hash for Rotation +impl hash::Hash for Rotation where DefaultAllocator: Allocator, >::Buffer: hash::Hash, @@ -40,14 +40,14 @@ where } } -impl Copy for Rotation +impl Copy for Rotation where DefaultAllocator: Allocator, >::Buffer: Copy, { } -impl Clone for Rotation +impl Clone for Rotation where DefaultAllocator: Allocator, >::Buffer: Clone, @@ -61,7 +61,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Rotation where - N: Scalar, + N: Scalar + Copy, D: DimName, MatrixN: Abomonation, DefaultAllocator: Allocator, @@ -80,7 +80,7 @@ where } #[cfg(feature = "serde-serialize")] -impl Serialize for Rotation +impl Serialize for Rotation where DefaultAllocator: Allocator, Owned: Serialize, @@ -92,7 +92,7 @@ where } #[cfg(feature = "serde-serialize")] -impl<'a, N: Scalar, D: DimName> Deserialize<'a> for Rotation +impl<'a, N: Scalar + Copy, D: DimName> Deserialize<'a> for Rotation where DefaultAllocator: Allocator, Owned: Deserialize<'a>, @@ -105,7 +105,7 @@ where } } -impl Rotation +impl Rotation where DefaultAllocator: Allocator { /// A reference to the underlying matrix representation of this rotation. @@ -432,9 +432,9 @@ where DefaultAllocator: Allocator + Allocator } } -impl Eq for Rotation where DefaultAllocator: Allocator {} +impl Eq for Rotation where DefaultAllocator: Allocator {} -impl PartialEq for Rotation +impl PartialEq for Rotation where DefaultAllocator: Allocator { #[inline] @@ -445,7 +445,7 @@ where DefaultAllocator: Allocator impl AbsDiffEq for Rotation where - N: Scalar + AbsDiffEq, + N: Scalar + Copy + AbsDiffEq, DefaultAllocator: Allocator, N::Epsilon: Copy, { @@ -464,7 +464,7 @@ where impl RelativeEq for Rotation where - N: Scalar + RelativeEq, + N: Scalar + Copy + RelativeEq, DefaultAllocator: Allocator, N::Epsilon: Copy, { @@ -488,7 +488,7 @@ where impl UlpsEq for Rotation where - N: Scalar + UlpsEq, + N: Scalar + Copy + UlpsEq, DefaultAllocator: Allocator, N::Epsilon: Copy, { diff --git a/src/geometry/rotation_construction.rs b/src/geometry/rotation_construction.rs index a7779cc6..3e9b2930 100644 --- a/src/geometry/rotation_construction.rs +++ b/src/geometry/rotation_construction.rs @@ -10,7 +10,7 @@ use crate::geometry::Rotation; impl Rotation where - N: Scalar + Zero + One, + N: Scalar + Copy + Zero + One, DefaultAllocator: Allocator, { /// Creates a new square identity rotation of the given `dimension`. @@ -32,7 +32,7 @@ where impl One for Rotation where - N: Scalar + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, DefaultAllocator: Allocator, { #[inline] diff --git a/src/geometry/rotation_ops.rs b/src/geometry/rotation_ops.rs index ed555b6b..8b0810a1 100644 --- a/src/geometry/rotation_ops.rs +++ b/src/geometry/rotation_ops.rs @@ -30,7 +30,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, Scalar, Unit, Vector, Vect use crate::geometry::{Point, Rotation}; -impl Index<(usize, usize)> for Rotation +impl Index<(usize, usize)> for Rotation where DefaultAllocator: Allocator { type Output = N; diff --git a/src/geometry/swizzle.rs b/src/geometry/swizzle.rs index d5740016..fdcb4743 100644 --- a/src/geometry/swizzle.rs +++ b/src/geometry/swizzle.rs @@ -6,7 +6,7 @@ use typenum::{self, Cmp, Greater}; macro_rules! impl_swizzle { ($( where $BaseDim: ident: $( $name: ident() -> $Result: ident[$($i: expr),+] ),+ ;)* ) => { $( - impl Point + impl Point where DefaultAllocator: Allocator, D::Value: Cmp diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index e64b3d2e..0e9b37fc 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -23,7 +23,7 @@ use crate::geometry::Point; /// A translation. #[repr(C)] #[derive(Debug)] -pub struct Translation +pub struct Translation where DefaultAllocator: Allocator { /// The translation coordinates, i.e., how much is added to a point's coordinates when it is @@ -31,7 +31,7 @@ where DefaultAllocator: Allocator pub vector: VectorN, } -impl hash::Hash for Translation +impl hash::Hash for Translation where DefaultAllocator: Allocator, Owned: hash::Hash, @@ -41,13 +41,13 @@ where } } -impl Copy for Translation +impl Copy for Translation where DefaultAllocator: Allocator, Owned: Copy, {} -impl Clone for Translation +impl Clone for Translation where DefaultAllocator: Allocator, Owned: Clone, @@ -61,7 +61,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Translation where - N: Scalar, + N: Scalar + Copy, D: DimName, VectorN: Abomonation, DefaultAllocator: Allocator, @@ -80,7 +80,7 @@ where } #[cfg(feature = "serde-serialize")] -impl Serialize for Translation +impl Serialize for Translation where DefaultAllocator: Allocator, Owned: Serialize, @@ -92,7 +92,7 @@ where } #[cfg(feature = "serde-serialize")] -impl<'a, N: Scalar, D: DimName> Deserialize<'a> for Translation +impl<'a, N: Scalar + Copy, D: DimName> Deserialize<'a> for Translation where DefaultAllocator: Allocator, Owned: Deserialize<'a>, @@ -105,7 +105,7 @@ where } } -impl Translation +impl Translation where DefaultAllocator: Allocator { /// Creates a new translation from the given vector. @@ -192,7 +192,7 @@ where DefaultAllocator: Allocator } } -impl Translation +impl Translation where DefaultAllocator: Allocator { /// Translate the given point. @@ -211,7 +211,7 @@ where DefaultAllocator: Allocator } } -impl Translation +impl Translation where DefaultAllocator: Allocator { /// Translate the given point by the inverse of this translation. @@ -228,9 +228,9 @@ where DefaultAllocator: Allocator } } -impl Eq for Translation where DefaultAllocator: Allocator {} +impl Eq for Translation where DefaultAllocator: Allocator {} -impl PartialEq for Translation +impl PartialEq for Translation where DefaultAllocator: Allocator { #[inline] @@ -239,7 +239,7 @@ where DefaultAllocator: Allocator } } -impl AbsDiffEq for Translation +impl AbsDiffEq for Translation where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -257,7 +257,7 @@ where } } -impl RelativeEq for Translation +impl RelativeEq for Translation where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -280,7 +280,7 @@ where } } -impl UlpsEq for Translation +impl UlpsEq for Translation where DefaultAllocator: Allocator, N::Epsilon: Copy, diff --git a/src/geometry/translation_construction.rs b/src/geometry/translation_construction.rs index 339bdd2a..266b4b3b 100644 --- a/src/geometry/translation_construction.rs +++ b/src/geometry/translation_construction.rs @@ -15,7 +15,7 @@ use crate::base::{DefaultAllocator, Scalar, VectorN}; use crate::geometry::Translation; -impl Translation +impl Translation where DefaultAllocator: Allocator { /// Creates a new identity translation. @@ -38,7 +38,7 @@ where DefaultAllocator: Allocator } } -impl One for Translation +impl One for Translation where DefaultAllocator: Allocator { #[inline] @@ -47,7 +47,7 @@ where DefaultAllocator: Allocator } } -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -78,7 +78,7 @@ where */ macro_rules! componentwise_constructors_impl( ($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$( - impl Translation + impl Translation where DefaultAllocator: Allocator { #[doc = "Initializes this translation from its components."] #[doc = "# Example\n```"] diff --git a/src/geometry/translation_conversion.rs b/src/geometry/translation_conversion.rs index b44412e6..2a6d9535 100644 --- a/src/geometry/translation_conversion.rs +++ b/src/geometry/translation_conversion.rs @@ -22,8 +22,8 @@ use crate::geometry::{Isometry, Point, Similarity, SuperTCategoryOf, TAffine, Tr impl SubsetOf> for Translation where - N1: Scalar, - N2: Scalar + SupersetOf, + N1: Scalar + Copy, + N2: Scalar + Copy + SupersetOf, DefaultAllocator: Allocator + Allocator, { #[inline] @@ -153,7 +153,7 @@ where } } -impl From> for MatrixN> +impl From> for MatrixN> where D: DimNameAdd, DefaultAllocator: Allocator + Allocator, DimNameSum>, @@ -164,7 +164,7 @@ where } } -impl From> for Translation +impl From> for Translation where DefaultAllocator: Allocator { #[inline] diff --git a/src/geometry/translation_coordinates.rs b/src/geometry/translation_coordinates.rs index c422415c..97eb5b32 100644 --- a/src/geometry/translation_coordinates.rs +++ b/src/geometry/translation_coordinates.rs @@ -16,7 +16,7 @@ use crate::geometry::Translation; macro_rules! deref_impl( ($D: ty, $Target: ident $(, $comps: ident)*) => { - impl Deref for Translation + impl Deref for Translation where DefaultAllocator: Allocator { type Target = $Target; @@ -26,7 +26,7 @@ macro_rules! deref_impl( } } - impl DerefMut for Translation + impl DerefMut for Translation where DefaultAllocator: Allocator { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 2c3beee3..9676cd83 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -318,7 +318,7 @@ where DefaultAllocator: Allocator + Allocator<(usize, usize), D> /// element `matrix[(i, i)]` is provided as argument. pub fn gauss_step(matrix: &mut Matrix, diag: N, i: usize) where - N: Scalar + Field, + N: Scalar + Copy + Field, S: StorageMut, { let mut submat = matrix.slice_range_mut(i.., i..); @@ -346,7 +346,7 @@ pub fn gauss_step_swap( i: usize, piv: usize, ) where - N: Scalar + Field, + N: Scalar + Copy + Field, S: StorageMut, { let piv = piv - i; diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index ce493905..c7fb9b2b 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -92,7 +92,7 @@ where DefaultAllocator: Allocator<(usize, usize), D> /// Applies this sequence of permutations to the rows of `rhs`. #[inline] - pub fn permute_rows(&self, rhs: &mut Matrix) + pub fn permute_rows(&self, rhs: &mut Matrix) where S2: StorageMut { for i in self.ipiv.rows_range(..self.len).iter() { rhs.swap_rows(i.0, i.1) @@ -101,7 +101,7 @@ where DefaultAllocator: Allocator<(usize, usize), D> /// Applies this sequence of permutations in reverse to the rows of `rhs`. #[inline] - pub fn inv_permute_rows( + pub fn inv_permute_rows( &self, rhs: &mut Matrix, ) where @@ -115,7 +115,7 @@ where DefaultAllocator: Allocator<(usize, usize), D> /// Applies this sequence of permutations to the columns of `rhs`. #[inline] - pub fn permute_columns( + pub fn permute_columns( &self, rhs: &mut Matrix, ) where @@ -128,7 +128,7 @@ where DefaultAllocator: Allocator<(usize, usize), D> /// Applies this sequence of permutations in reverse to the columns of `rhs`. #[inline] - pub fn inv_permute_columns( + pub fn inv_permute_columns( &self, rhs: &mut Matrix, ) where diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 2fc571c7..e8c259a2 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -105,7 +105,7 @@ pub trait CsStorageMut: /// A storage of column-compressed sparse matrix based on a Vec. #[derive(Clone, Debug, PartialEq)] -pub struct CsVecStorage +pub struct CsVecStorage where DefaultAllocator: Allocator { pub(crate) shape: (R, C), @@ -114,7 +114,7 @@ where DefaultAllocator: Allocator pub(crate) vals: Vec, } -impl CsVecStorage +impl CsVecStorage where DefaultAllocator: Allocator { /// The value buffer of this storage. @@ -133,9 +133,9 @@ where DefaultAllocator: Allocator } } -impl CsVecStorage where DefaultAllocator: Allocator {} +impl CsVecStorage where DefaultAllocator: Allocator {} -impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage +impl<'a, N: Scalar + Copy, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage where DefaultAllocator: Allocator { type ColumnEntries = ColumnEntries<'a, N>; @@ -154,7 +154,7 @@ where DefaultAllocator: Allocator } } -impl CsStorage for CsVecStorage +impl CsStorage for CsVecStorage where DefaultAllocator: Allocator { #[inline] @@ -199,7 +199,7 @@ where DefaultAllocator: Allocator } } -impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIterMut<'a, N, R, C> for CsVecStorage +impl<'a, N: Scalar + Copy, R: Dim, C: Dim> CsStorageIterMut<'a, N, R, C> for CsVecStorage where DefaultAllocator: Allocator { type ValuesMut = slice::IterMut<'a, N>; @@ -220,11 +220,11 @@ where DefaultAllocator: Allocator } } -impl CsStorageMut for CsVecStorage where DefaultAllocator: Allocator +impl CsStorageMut for CsVecStorage where DefaultAllocator: Allocator {} /* -pub struct CsSliceStorage<'a, N: Scalar, R: Dim, C: DimAdd> { +pub struct CsSliceStorage<'a, N: Scalar + Copy, R: Dim, C: DimAdd> { shape: (R, C), p: VectorSlice>, i: VectorSlice, @@ -234,7 +234,7 @@ pub struct CsSliceStorage<'a, N: Scalar, R: Dim, C: DimAdd> { /// A compressed sparse column matrix. #[derive(Clone, Debug, PartialEq)] pub struct CsMatrix< - N: Scalar, + N: Scalar + Copy, R: Dim = Dynamic, C: Dim = Dynamic, S: CsStorage = CsVecStorage, @@ -246,7 +246,7 @@ pub struct CsMatrix< /// A column compressed sparse vector. pub type CsVector> = CsMatrix; -impl CsMatrix +impl CsMatrix where DefaultAllocator: Allocator { /// Creates a new compressed sparse column matrix with the specified dimension and @@ -323,7 +323,7 @@ where DefaultAllocator: Allocator } /* -impl CsMatrix { +impl CsMatrix { pub(crate) fn from_parts( nrows: usize, ncols: usize, @@ -340,7 +340,7 @@ impl CsMatrix { } */ -impl> CsMatrix { +impl> CsMatrix { pub(crate) fn from_data(data: S) -> Self { CsMatrix { data, @@ -433,7 +433,7 @@ impl> CsMatrix { } } -impl> CsMatrix { +impl> CsMatrix { /// Iterator through all the mutable values of this sparse matrix. #[inline] pub fn values_mut(&mut self) -> impl Iterator { @@ -441,7 +441,7 @@ impl> CsMatrix { } } -impl CsMatrix +impl CsMatrix where DefaultAllocator: Allocator { pub(crate) fn sort(&mut self) diff --git a/src/sparse/cs_matrix_conversion.rs b/src/sparse/cs_matrix_conversion.rs index 31e53796..251fa282 100644 --- a/src/sparse/cs_matrix_conversion.rs +++ b/src/sparse/cs_matrix_conversion.rs @@ -7,7 +7,7 @@ use crate::sparse::{CsMatrix, CsStorage}; use crate::storage::Storage; use crate::{DefaultAllocator, Dim, Dynamic, Matrix, MatrixMN, Scalar}; -impl<'a, N: Scalar + Zero + ClosedAdd> CsMatrix { +impl<'a, N: Scalar + Copy + Zero + ClosedAdd> CsMatrix { /// Creates a column-compressed sparse matrix from a sparse matrix in triplet form. pub fn from_triplet( nrows: usize, @@ -21,7 +21,7 @@ impl<'a, N: Scalar + Zero + ClosedAdd> CsMatrix { } } -impl<'a, N: Scalar + Zero + ClosedAdd, R: Dim, C: Dim> CsMatrix +impl<'a, N: Scalar + Copy + Zero + ClosedAdd, R: Dim, C: Dim> CsMatrix where DefaultAllocator: Allocator + Allocator { /// Creates a column-compressed sparse matrix from a sparse matrix in triplet form. @@ -66,7 +66,7 @@ where DefaultAllocator: Allocator + Allocator } } -impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From> for MatrixMN +impl<'a, N: Scalar + Copy + Zero, R: Dim, C: Dim, S> From> for MatrixMN where S: CsStorage, DefaultAllocator: Allocator, @@ -85,7 +85,7 @@ where } } -impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From> for CsMatrix +impl<'a, N: Scalar + Copy + Zero, R: Dim, C: Dim, S> From> for CsMatrix where S: Storage, DefaultAllocator: Allocator + Allocator, diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index 322ebb34..9e827e3c 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -8,7 +8,7 @@ use crate::sparse::{CsMatrix, CsStorage, CsStorageMut, CsVector}; use crate::storage::StorageMut; use crate::{DefaultAllocator, Dim, Scalar, Vector, VectorN, U1}; -impl> CsMatrix { +impl> CsMatrix { fn scatter( &self, j: usize, @@ -39,7 +39,7 @@ impl> CsMatrix { } /* -impl CsVector { +impl CsVector { pub fn axpy(&mut self, alpha: N, x: CsVector, beta: N) { // First, compute the number of non-zero entries. let mut nnzero = 0; @@ -76,7 +76,7 @@ impl CsVector { } */ -impl> Vector { +impl> Vector { /// Perform a sparse axpy operation: `self = alpha * x + beta * self` operation. pub fn axpy_cs(&mut self, alpha: N, x: &CsVector, beta: N) where @@ -126,7 +126,7 @@ impl> Vect impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Mul<&'b CsMatrix> for &'a CsMatrix where - N: Scalar + ClosedAdd + ClosedMul + Zero, + N: Scalar + Copy + ClosedAdd + ClosedMul + Zero, R1: Dim, C1: Dim, R2: Dim, @@ -219,7 +219,7 @@ where impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix> for &'a CsMatrix where - N: Scalar + ClosedAdd + ClosedMul + One, + N: Scalar + Copy + ClosedAdd + ClosedMul + One, R1: Dim, C1: Dim, R2: Dim, @@ -287,7 +287,7 @@ where impl<'a, 'b, N, R, C, S> Mul for CsMatrix where - N: Scalar + ClosedAdd + ClosedMul + Zero, + N: Scalar + Copy + ClosedAdd + ClosedMul + Zero, R: Dim, C: Dim, S: CsStorageMut, From e981283500d35e580326a7422918269ad5734ffc Mon Sep 17 00:00:00 2001 From: Aaron Hill Date: Tue, 19 Nov 2019 17:42:33 -0500 Subject: [PATCH 39/67] Switch to `wrapping_offset` instead of unsafe `offset` --- src/base/storage.rs | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/src/base/storage.rs b/src/base/storage.rs index 80a6a2d8..e7439552 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -72,16 +72,7 @@ pub unsafe trait Storage: Debug + Sized { /// Gets the address of the i-th matrix component without performing bound-checking. #[inline] unsafe fn get_address_unchecked_linear(&self, i: usize) -> *const N { - let shape = self.shape(); - if shape.0.value() * shape.1.value() == 0 { - // If we have a zero-size matrix, our pointer must - // be dangling. Instead of calling 'offset', we - // just re-use our pointer, since actually using - // it would be undefined behavior - self.ptr() - } else { - self.ptr().offset(i as isize) - } + self.ptr().wrapping_offset(i as isize) } /// Gets the address of the i-th matrix component without performing bound-checking. @@ -133,16 +124,7 @@ pub unsafe trait StorageMut: Storage { /// Gets the mutable address of the i-th matrix component without performing bound-checking. #[inline] unsafe fn get_address_unchecked_linear_mut(&mut self, i: usize) -> *mut N { - let shape = self.shape(); - if shape.0.value() * shape.1.value() == 0 { - // If we have a zero-size matrix, our pointer must - // be dangling. Instead of calling 'offset', we - // just re-use our pointer, since actually using - // it would be undefined behavior - self.ptr_mut() - } else { - self.ptr_mut().offset(i as isize) - } + self.ptr_mut().wrapping_offset(i as isize) } /// Gets the mutable address of the i-th matrix component without performing bound-checking. From 52aac8b9756815ec0d190c36b3af1dbeb51b99e4 Mon Sep 17 00:00:00 2001 From: Avi Weinstock Date: Thu, 5 Dec 2019 17:54:17 -0500 Subject: [PATCH 40/67] Add inlined_clone to Scalar, and relax bounds from `Scalar + Copy` to `Scalar + Clone` nearly everywhere. The various nalgebra-lapack FooScalars are still Copy because they make use of uninitialized memory. nalgebgra-glm Number still uses Copy because upstream `approx` requires it. --- examples/scalar_genericity.rs | 4 +- nalgebra-glm/src/common.rs | 6 +- nalgebra-glm/src/constructors.rs | 32 ++-- nalgebra-glm/src/gtc/bitfield.rs | 10 +- nalgebra-glm/src/gtc/integer.rs | 4 +- nalgebra-glm/src/gtc/matrix_access.rs | 8 +- nalgebra-glm/src/gtc/packing.rs | 6 +- nalgebra-glm/src/gtc/round.rs | 18 +-- nalgebra-glm/src/gtc/type_ptr.rs | 94 +++++------ nalgebra-glm/src/gtc/ulp.rs | 2 +- nalgebra-glm/src/integer.rs | 20 +-- nalgebra-glm/src/matrix.rs | 2 +- nalgebra-glm/src/packing.rs | 24 +-- nalgebra-glm/src/traits.rs | 4 +- nalgebra-lapack/src/cholesky.rs | 2 +- nalgebra-lapack/src/eigen.rs | 4 +- nalgebra-lapack/src/hessenberg.rs | 2 +- nalgebra-lapack/src/lu.rs | 2 +- nalgebra-lapack/src/qr.rs | 2 +- nalgebra-lapack/src/schur.rs | 4 +- nalgebra-lapack/src/svd.rs | 4 +- nalgebra-lapack/src/symmetric_eigen.rs | 4 +- src/base/allocator.rs | 12 +- src/base/array_storage.rs | 16 +- src/base/blas.rs | 110 ++++++------- src/base/cg.rs | 14 +- src/base/componentwise.rs | 16 +- src/base/construction.rs | 36 ++--- src/base/construction_slice.rs | 16 +- src/base/conversion.rs | 54 +++---- src/base/coordinates.rs | 6 +- src/base/default_allocator.rs | 20 +-- src/base/edition.rs | 46 +++--- src/base/indexing.rs | 18 +-- src/base/iter.rs | 40 ++--- src/base/matrix.rs | 192 +++++++++++------------ src/base/matrix_alga.rs | 28 ++-- src/base/matrix_slice.rs | 40 ++--- src/base/ops.rs | 92 +++++------ src/base/properties.rs | 2 +- src/base/scalar.rs | 14 +- src/base/statistics.rs | 15 +- src/base/storage.rs | 8 +- src/base/swizzle.rs | 4 +- src/base/vec_storage.rs | 20 +-- src/debug/random_orthogonal.rs | 2 +- src/debug/random_sdp.rs | 2 +- src/geometry/op_macros.rs | 8 +- src/geometry/perspective.rs | 2 +- src/geometry/point.rs | 26 +-- src/geometry/point_alga.rs | 10 +- src/geometry/point_construction.rs | 16 +- src/geometry/point_conversion.rs | 22 +-- src/geometry/point_coordinates.rs | 4 +- src/geometry/point_ops.rs | 18 +-- src/geometry/reflection.rs | 2 +- src/geometry/rotation.rs | 24 +-- src/geometry/rotation_construction.rs | 4 +- src/geometry/rotation_ops.rs | 2 +- src/geometry/swizzle.rs | 4 +- src/geometry/translation.rs | 28 ++-- src/geometry/translation_construction.rs | 10 +- src/geometry/translation_conversion.rs | 8 +- src/geometry/translation_coordinates.rs | 4 +- src/linalg/lu.rs | 8 +- src/linalg/permutation_sequence.rs | 8 +- src/sparse/cs_matrix.rs | 40 ++--- src/sparse/cs_matrix_conversion.rs | 10 +- src/sparse/cs_matrix_ops.rs | 30 ++-- 69 files changed, 692 insertions(+), 677 deletions(-) diff --git a/examples/scalar_genericity.rs b/examples/scalar_genericity.rs index 18b5c52a..c1d363f7 100644 --- a/examples/scalar_genericity.rs +++ b/examples/scalar_genericity.rs @@ -4,11 +4,11 @@ extern crate nalgebra as na; use alga::general::{RealField, RingCommutative}; use na::{Scalar, Vector3}; -fn print_vector(m: &Vector3) { +fn print_vector(m: &Vector3) { println!("{:?}", m) } -fn print_squared_norm(v: &Vector3) { +fn print_squared_norm(v: &Vector3) { // NOTE: alternatively, nalgebra already defines `v.squared_norm()`. let sqnorm = v.dot(v); println!("{:?}", sqnorm); diff --git a/nalgebra-glm/src/common.rs b/nalgebra-glm/src/common.rs index 5cebbc3d..42f26d30 100644 --- a/nalgebra-glm/src/common.rs +++ b/nalgebra-glm/src/common.rs @@ -297,13 +297,13 @@ where DefaultAllocator: Alloc { v.map(int_bits_to_float) } -//pub fn isinf(x: &TVec) -> TVec +//pub fn isinf(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // unimplemented!() // //} // -//pub fn isnan(x: &TVec) -> TVec +//pub fn isnan(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // unimplemented!() // @@ -504,7 +504,7 @@ where DefaultAllocator: Alloc { x.map(|x| x.round()) } -//pub fn roundEven(x: &TVec) -> TVec +//pub fn roundEven(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // unimplemented!() //} diff --git a/nalgebra-glm/src/constructors.rs b/nalgebra-glm/src/constructors.rs index 93d06e45..020bbcb4 100644 --- a/nalgebra-glm/src/constructors.rs +++ b/nalgebra-glm/src/constructors.rs @@ -15,28 +15,28 @@ use crate::aliases::{TMat, Qua, TVec1, TVec2, TVec3, TVec4, TMat2, TMat2x3, TMat /// # use nalgebra_glm as glm; /// let v = glm::vec1(true); /// ``` -pub fn vec1(x: N) -> TVec1 { +pub fn vec1(x: N) -> TVec1 { TVec1::new(x) } /// Creates a new 2D vector. -pub fn vec2(x: N, y: N) -> TVec2 { +pub fn vec2(x: N, y: N) -> TVec2 { TVec2::new(x, y) } /// Creates a new 3D vector. -pub fn vec3(x: N, y: N, z: N) -> TVec3 { +pub fn vec3(x: N, y: N, z: N) -> TVec3 { TVec3::new(x, y, z) } /// Creates a new 4D vector. -pub fn vec4(x: N, y: N, z: N, w: N) -> TVec4 { +pub fn vec4(x: N, y: N, z: N, w: N) -> TVec4 { TVec4::new(x, y, z, w) } /// Create a new 2x2 matrix. -pub fn mat2(m11: N, m12: N, +pub fn mat2(m11: N, m12: N, m21: N, m22: N) -> TMat2 { TMat::::new( m11, m12, @@ -45,7 +45,7 @@ pub fn mat2(m11: N, m12: N, } /// Create a new 2x2 matrix. -pub fn mat2x2(m11: N, m12: N, +pub fn mat2x2(m11: N, m12: N, m21: N, m22: N) -> TMat2 { TMat::::new( m11, m12, @@ -54,7 +54,7 @@ pub fn mat2x2(m11: N, m12: N, } /// Create a new 2x3 matrix. -pub fn mat2x3(m11: N, m12: N, m13: N, +pub fn mat2x3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N) -> TMat2x3 { TMat::::new( m11, m12, m13, @@ -63,7 +63,7 @@ pub fn mat2x3(m11: N, m12: N, m13: N, } /// Create a new 2x4 matrix. -pub fn mat2x4(m11: N, m12: N, m13: N, m14: N, +pub fn mat2x4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N) -> TMat2x4 { TMat::::new( m11, m12, m13, m14, @@ -72,7 +72,7 @@ pub fn mat2x4(m11: N, m12: N, m13: N, m14: N, } /// Create a new 3x3 matrix. -pub fn mat3(m11: N, m12: N, m13: N, +pub fn mat3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N, m31: N, m32: N, m33: N) -> TMat3 { TMat::::new( @@ -83,7 +83,7 @@ pub fn mat3(m11: N, m12: N, m13: N, } /// Create a new 3x2 matrix. -pub fn mat3x2(m11: N, m12: N, +pub fn mat3x2(m11: N, m12: N, m21: N, m22: N, m31: N, m32: N) -> TMat3x2 { TMat::::new( @@ -94,7 +94,7 @@ pub fn mat3x2(m11: N, m12: N, } /// Create a new 3x3 matrix. -pub fn mat3x3(m11: N, m12: N, m13: N, +pub fn mat3x3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N, m31: N, m32: N, m33: N) -> TMat3 { TMat::::new( @@ -105,7 +105,7 @@ pub fn mat3x3(m11: N, m12: N, m13: N, } /// Create a new 3x4 matrix. -pub fn mat3x4(m11: N, m12: N, m13: N, m14: N, +pub fn mat3x4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N, m31: N, m32: N, m33: N, m34: N) -> TMat3x4 { TMat::::new( @@ -116,7 +116,7 @@ pub fn mat3x4(m11: N, m12: N, m13: N, m14: N, } /// Create a new 4x2 matrix. -pub fn mat4x2(m11: N, m12: N, +pub fn mat4x2(m11: N, m12: N, m21: N, m22: N, m31: N, m32: N, m41: N, m42: N) -> TMat4x2 { @@ -129,7 +129,7 @@ pub fn mat4x2(m11: N, m12: N, } /// Create a new 4x3 matrix. -pub fn mat4x3(m11: N, m12: N, m13: N, +pub fn mat4x3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N, m31: N, m32: N, m33: N, m41: N, m42: N, m43: N) -> TMat4x3 { @@ -142,7 +142,7 @@ pub fn mat4x3(m11: N, m12: N, m13: N, } /// Create a new 4x4 matrix. -pub fn mat4x4(m11: N, m12: N, m13: N, m14: N, +pub fn mat4x4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N, m31: N, m32: N, m33: N, m34: N, m41: N, m42: N, m43: N, m44: N) -> TMat4 { @@ -155,7 +155,7 @@ pub fn mat4x4(m11: N, m12: N, m13: N, m14: N, } /// Create a new 4x4 matrix. -pub fn mat4(m11: N, m12: N, m13: N, m14: N, +pub fn mat4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N, m31: N, m32: N, m33: N, m34: N, m41: N, m42: N, m43: N, m44: N) -> TMat4 { diff --git a/nalgebra-glm/src/gtc/bitfield.rs b/nalgebra-glm/src/gtc/bitfield.rs index 5e4f7063..81a1646f 100644 --- a/nalgebra-glm/src/gtc/bitfield.rs +++ b/nalgebra-glm/src/gtc/bitfield.rs @@ -19,7 +19,7 @@ pub fn bitfieldFillOne(Value: IU, FirstBit: i32, BitCount: i32) -> IU { unimplemented!() } -pub fn bitfieldFillOne2(Value: &TVec, FirstBit: i32, BitCount: i32) -> TVec +pub fn bitfieldFillOne2(Value: &TVec, FirstBit: i32, BitCount: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -28,7 +28,7 @@ pub fn bitfieldFillZero(Value: IU, FirstBit: i32, BitCount: i32) -> IU { unimplemented!() } -pub fn bitfieldFillZero2(Value: &TVec, FirstBit: i32, BitCount: i32) -> TVec +pub fn bitfieldFillZero2(Value: &TVec, FirstBit: i32, BitCount: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -113,7 +113,7 @@ pub fn bitfieldRotateLeft(In: IU, Shift: i32) -> IU { unimplemented!() } -pub fn bitfieldRotateLeft2(In: &TVec, Shift: i32) -> TVec +pub fn bitfieldRotateLeft2(In: &TVec, Shift: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -122,7 +122,7 @@ pub fn bitfieldRotateRight(In: IU, Shift: i32) -> IU { unimplemented!() } -pub fn bitfieldRotateRight2(In: &TVec, Shift: i32) -> TVec +pub fn bitfieldRotateRight2(In: &TVec, Shift: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -131,7 +131,7 @@ pub fn mask(Bits: IU) -> IU { unimplemented!() } -pub fn mask2(v: &TVec) -> TVec +pub fn mask2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } diff --git a/nalgebra-glm/src/gtc/integer.rs b/nalgebra-glm/src/gtc/integer.rs index 146b874c..d5965f38 100644 --- a/nalgebra-glm/src/gtc/integer.rs +++ b/nalgebra-glm/src/gtc/integer.rs @@ -3,7 +3,7 @@ //use crate::traits::{Alloc, Dimension}; //use crate::aliases::TVec; -//pub fn iround(x: &TVec) -> TVec +//pub fn iround(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // x.map(|x| x.round()) //} @@ -12,7 +12,7 @@ // unimplemented!() //} // -//pub fn uround(x: &TVec) -> TVec +//pub fn uround(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // unimplemented!() //} diff --git a/nalgebra-glm/src/gtc/matrix_access.rs b/nalgebra-glm/src/gtc/matrix_access.rs index 2f9bb5fa..bfc0199b 100644 --- a/nalgebra-glm/src/gtc/matrix_access.rs +++ b/nalgebra-glm/src/gtc/matrix_access.rs @@ -10,7 +10,7 @@ use crate::traits::{Alloc, Dimension}; /// * [`row`](fn.row.html) /// * [`set_column`](fn.set_column.html) /// * [`set_row`](fn.set_row.html) -pub fn column( +pub fn column( m: &TMat, index: usize, ) -> TVec @@ -27,7 +27,7 @@ where /// * [`column`](fn.column.html) /// * [`row`](fn.row.html) /// * [`set_row`](fn.set_row.html) -pub fn set_column( +pub fn set_column( m: &TMat, index: usize, x: &TVec, @@ -47,7 +47,7 @@ where /// * [`column`](fn.column.html) /// * [`set_column`](fn.set_column.html) /// * [`set_row`](fn.set_row.html) -pub fn row(m: &TMat, index: usize) -> TVec +pub fn row(m: &TMat, index: usize) -> TVec where DefaultAllocator: Alloc { m.row(index).into_owned().transpose() } @@ -59,7 +59,7 @@ where DefaultAllocator: Alloc { /// * [`column`](fn.column.html) /// * [`row`](fn.row.html) /// * [`set_column`](fn.set_column.html) -pub fn set_row( +pub fn set_row( m: &TMat, index: usize, x: &TVec, diff --git a/nalgebra-glm/src/gtc/packing.rs b/nalgebra-glm/src/gtc/packing.rs index 1306eca3..5d649538 100644 --- a/nalgebra-glm/src/gtc/packing.rs +++ b/nalgebra-glm/src/gtc/packing.rs @@ -49,7 +49,7 @@ pub fn packInt4x8(v: &I8Vec4) -> i32 { unimplemented!() } -pub fn packRGBM(rgb: &TVec3) -> TVec4 { +pub fn packRGBM(rgb: &TVec3) -> TVec4 { unimplemented!() } @@ -155,7 +155,7 @@ pub fn unpackF3x9_E1x5(p: i32) -> Vec3 { unimplemented!() } -pub fn unpackHalf(p: TVec) -> TVec +pub fn unpackHalf(p: TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -192,7 +192,7 @@ pub fn unpackInt4x8(p: i32) -> I8Vec4 { unimplemented!() } -pub fn unpackRGBM(rgbm: &TVec4) -> TVec3 { +pub fn unpackRGBM(rgbm: &TVec4) -> TVec3 { unimplemented!() } diff --git a/nalgebra-glm/src/gtc/round.rs b/nalgebra-glm/src/gtc/round.rs index d1ca295b..21d3a21e 100644 --- a/nalgebra-glm/src/gtc/round.rs +++ b/nalgebra-glm/src/gtc/round.rs @@ -8,7 +8,7 @@ pub fn ceilMultiple(v: T, Multiple: T) -> T { unimplemented!() } -pub fn ceilMultiple2(v: &TVec, Multiple: &TVec) -> TVec +pub fn ceilMultiple2(v: &TVec, Multiple: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -17,7 +17,7 @@ pub fn ceilPowerOfTwo(v: IU) -> IU { unimplemented!() } -pub fn ceilPowerOfTwo2(v: &TVec) -> TVec +pub fn ceilPowerOfTwo2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -26,7 +26,7 @@ pub fn floorMultiple(v: T, Multiple: T) -> T { unimplemented!() } -pub fn floorMultiple2(v: &TVec, Multiple: &TVec) -> TVec +pub fn floorMultiple2(v: &TVec, Multiple: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -35,7 +35,7 @@ pub fn floorPowerOfTwo(v: IU) -> IU { unimplemented!() } -pub fn floorPowerOfTwo2(v: &TVec) -> TVec +pub fn floorPowerOfTwo2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -44,12 +44,12 @@ pub fn isMultiple(v: IU, Multiple: IU) -> bool { unimplemented!() } -pub fn isMultiple2(v: &TVec,Multiple: N) -> TVec +pub fn isMultiple2(v: &TVec,Multiple: N) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn isMultiple3(v: &TVec, Multiple: &TVec) -> TVec +pub fn isMultiple3(v: &TVec, Multiple: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -58,7 +58,7 @@ pub fn isPowerOfTwo2(v: IU) -> bool { unimplemented!() } -pub fn isPowerOfTwo(v: &TVec) -> TVec +pub fn isPowerOfTwo(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -67,7 +67,7 @@ pub fn roundMultiple(v: T, Multiple: T) -> T { unimplemented!() } -pub fn roundMultiple2(v: &TVec, Multiple: &TVec) -> TVec +pub fn roundMultiple2(v: &TVec, Multiple: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -76,7 +76,7 @@ pub fn roundPowerOfTwo(v: IU) -> IU { unimplemented!() } -pub fn roundPowerOfTwo2(v: &TVec) -> TVec +pub fn roundPowerOfTwo2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } diff --git a/nalgebra-glm/src/gtc/type_ptr.rs b/nalgebra-glm/src/gtc/type_ptr.rs index 92309e93..ad9e6f8f 100644 --- a/nalgebra-glm/src/gtc/type_ptr.rs +++ b/nalgebra-glm/src/gtc/type_ptr.rs @@ -7,62 +7,62 @@ use crate::aliases::{ use crate::traits::{Alloc, Dimension, Number}; /// Creates a 2x2 matrix from a slice arranged in column-major order. -pub fn make_mat2(ptr: &[N]) -> TMat2 { +pub fn make_mat2(ptr: &[N]) -> TMat2 { TMat2::from_column_slice(ptr) } /// Creates a 2x2 matrix from a slice arranged in column-major order. -pub fn make_mat2x2(ptr: &[N]) -> TMat2 { +pub fn make_mat2x2(ptr: &[N]) -> TMat2 { TMat2::from_column_slice(ptr) } /// Creates a 2x3 matrix from a slice arranged in column-major order. -pub fn make_mat2x3(ptr: &[N]) -> TMat2x3 { +pub fn make_mat2x3(ptr: &[N]) -> TMat2x3 { TMat2x3::from_column_slice(ptr) } /// Creates a 2x4 matrix from a slice arranged in column-major order. -pub fn make_mat2x4(ptr: &[N]) -> TMat2x4 { +pub fn make_mat2x4(ptr: &[N]) -> TMat2x4 { TMat2x4::from_column_slice(ptr) } /// Creates a 3 matrix from a slice arranged in column-major order. -pub fn make_mat3(ptr: &[N]) -> TMat3 { +pub fn make_mat3(ptr: &[N]) -> TMat3 { TMat3::from_column_slice(ptr) } /// Creates a 3x2 matrix from a slice arranged in column-major order. -pub fn make_mat3x2(ptr: &[N]) -> TMat3x2 { +pub fn make_mat3x2(ptr: &[N]) -> TMat3x2 { TMat3x2::from_column_slice(ptr) } /// Creates a 3x3 matrix from a slice arranged in column-major order. -pub fn make_mat3x3(ptr: &[N]) -> TMat3 { +pub fn make_mat3x3(ptr: &[N]) -> TMat3 { TMat3::from_column_slice(ptr) } /// Creates a 3x4 matrix from a slice arranged in column-major order. -pub fn make_mat3x4(ptr: &[N]) -> TMat3x4 { +pub fn make_mat3x4(ptr: &[N]) -> TMat3x4 { TMat3x4::from_column_slice(ptr) } /// Creates a 4x4 matrix from a slice arranged in column-major order. -pub fn make_mat4(ptr: &[N]) -> TMat4 { +pub fn make_mat4(ptr: &[N]) -> TMat4 { TMat4::from_column_slice(ptr) } /// Creates a 4x2 matrix from a slice arranged in column-major order. -pub fn make_mat4x2(ptr: &[N]) -> TMat4x2 { +pub fn make_mat4x2(ptr: &[N]) -> TMat4x2 { TMat4x2::from_column_slice(ptr) } /// Creates a 4x3 matrix from a slice arranged in column-major order. -pub fn make_mat4x3(ptr: &[N]) -> TMat4x3 { +pub fn make_mat4x3(ptr: &[N]) -> TMat4x3 { TMat4x3::from_column_slice(ptr) } /// Creates a 4x4 matrix from a slice arranged in column-major order. -pub fn make_mat4x4(ptr: &[N]) -> TMat4 { +pub fn make_mat4x4(ptr: &[N]) -> TMat4 { TMat4::from_column_slice(ptr) } @@ -75,8 +75,8 @@ pub fn mat2_to_mat3(m: &TMat2) -> TMat3 { } /// Converts a 3x3 matrix to a 2x2 matrix. -pub fn mat3_to_mat2(m: &TMat3) -> TMat2 { - TMat2::new(m.m11, m.m12, m.m21, m.m22) +pub fn mat3_to_mat2(m: &TMat3) -> TMat2 { + TMat2::new(m.m11.inlined_clone(), m.m12.inlined_clone(), m.m21.inlined_clone(), m.m22.inlined_clone()) } /// Converts a 3x3 matrix to a 4x4 matrix. @@ -90,9 +90,11 @@ pub fn mat3_to_mat4(m: &TMat3) -> TMat4 { } /// Converts a 4x4 matrix to a 3x3 matrix. -pub fn mat4_to_mat3(m: &TMat4) -> TMat3 { +pub fn mat4_to_mat3(m: &TMat4) -> TMat3 { TMat3::new( - m.m11, m.m12, m.m13, m.m21, m.m22, m.m23, m.m31, m.m32, m.m33, + m.m11.inlined_clone(), m.m12.inlined_clone(), m.m13.inlined_clone(), + m.m21.inlined_clone(), m.m22.inlined_clone(), m.m23.inlined_clone(), + m.m31.inlined_clone(), m.m32.inlined_clone(), m.m33.inlined_clone(), ) } @@ -107,8 +109,8 @@ pub fn mat2_to_mat4(m: &TMat2) -> TMat4 { } /// Converts a 4x4 matrix to a 2x2 matrix. -pub fn mat4_to_mat2(m: &TMat4) -> TMat2 { - TMat2::new(m.m11, m.m12, m.m21, m.m22) +pub fn mat4_to_mat2(m: &TMat4) -> TMat2 { + TMat2::new(m.m11.inlined_clone(), m.m12.inlined_clone(), m.m21.inlined_clone(), m.m22.inlined_clone()) } /// Creates a quaternion from a slice arranged as `[x, y, z, w]`. @@ -123,8 +125,8 @@ pub fn make_quat(ptr: &[N]) -> Qua { /// * [`make_vec2`](fn.make_vec2.html) /// * [`make_vec3`](fn.make_vec3.html) /// * [`make_vec4`](fn.make_vec4.html) -pub fn make_vec1(v: &TVec1) -> TVec1 { - *v +pub fn make_vec1(v: &TVec1) -> TVec1 { + v.clone() } /// Creates a 1D vector from another vector. @@ -137,8 +139,8 @@ pub fn make_vec1(v: &TVec1) -> TVec1 { /// * [`vec1_to_vec2`](fn.vec1_to_vec2.html) /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) -pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { - TVec1::new(v.x) +pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { + TVec1::new(v.x.inlined_clone()) } /// Creates a 1D vector from another vector. @@ -151,8 +153,8 @@ pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { /// * [`vec1_to_vec2`](fn.vec1_to_vec2.html) /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) -pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { - TVec1::new(v.x) +pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { + TVec1::new(v.x.inlined_clone()) } /// Creates a 1D vector from another vector. @@ -165,8 +167,8 @@ pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { /// * [`vec1_to_vec2`](fn.vec1_to_vec2.html) /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) -pub fn vec4_to_vec1(v: &TVec4) -> TVec1 { - TVec1::new(v.x) +pub fn vec4_to_vec1(v: &TVec4) -> TVec1 { + TVec1::new(v.x.inlined_clone()) } /// Creates a 2D vector from another vector. @@ -182,7 +184,7 @@ pub fn vec4_to_vec1(v: &TVec4) -> TVec1 { /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) pub fn vec1_to_vec2(v: &TVec1) -> TVec2 { - TVec2::new(v.x, N::zero()) + TVec2::new(v.x.inlined_clone(), N::zero()) } /// Creates a 2D vector from another vector. @@ -196,8 +198,8 @@ pub fn vec1_to_vec2(v: &TVec1) -> TVec2 { /// * [`vec2_to_vec2`](fn.vec2_to_vec2.html) /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) -pub fn vec2_to_vec2(v: &TVec2) -> TVec2 { - *v +pub fn vec2_to_vec2(v: &TVec2) -> TVec2 { + v.clone() } /// Creates a 2D vector from another vector. @@ -210,8 +212,8 @@ pub fn vec2_to_vec2(v: &TVec2) -> TVec2 { /// * [`vec2_to_vec2`](fn.vec2_to_vec2.html) /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) -pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { - TVec2::new(v.x, v.y) +pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { + TVec2::new(v.x.inlined_clone(), v.y.inlined_clone()) } /// Creates a 2D vector from another vector. @@ -224,8 +226,8 @@ pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { /// * [`vec2_to_vec2`](fn.vec2_to_vec2.html) /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) -pub fn vec4_to_vec2(v: &TVec4) -> TVec2 { - TVec2::new(v.x, v.y) +pub fn vec4_to_vec2(v: &TVec4) -> TVec2 { + TVec2::new(v.x.inlined_clone(), v.y.inlined_clone()) } /// Creates a 2D vector from a slice. @@ -235,7 +237,7 @@ pub fn vec4_to_vec2(v: &TVec4) -> TVec2 { /// * [`make_vec1`](fn.make_vec1.html) /// * [`make_vec3`](fn.make_vec3.html) /// * [`make_vec4`](fn.make_vec4.html) -pub fn make_vec2(ptr: &[N]) -> TVec2 { +pub fn make_vec2(ptr: &[N]) -> TVec2 { TVec2::from_column_slice(ptr) } @@ -251,7 +253,7 @@ pub fn make_vec2(ptr: &[N]) -> TVec2 { /// * [`vec1_to_vec2`](fn.vec1_to_vec2.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) pub fn vec1_to_vec3(v: &TVec1) -> TVec3 { - TVec3::new(v.x, N::zero(), N::zero()) + TVec3::new(v.x.inlined_clone(), N::zero(), N::zero()) } /// Creates a 3D vector from another vector. @@ -267,7 +269,7 @@ pub fn vec1_to_vec3(v: &TVec1) -> TVec3 { /// * [`vec3_to_vec2`](fn.vec3_to_vec2.html) /// * [`vec3_to_vec4`](fn.vec3_to_vec4.html) pub fn vec2_to_vec3(v: &TVec2) -> TVec3 { - TVec3::new(v.x, v.y, N::zero()) + TVec3::new(v.x.inlined_clone(), v.y.inlined_clone(), N::zero()) } /// Creates a 3D vector from another vector. @@ -280,8 +282,8 @@ pub fn vec2_to_vec3(v: &TVec2) -> TVec3 { /// * [`vec3_to_vec1`](fn.vec3_to_vec1.html) /// * [`vec3_to_vec2`](fn.vec3_to_vec2.html) /// * [`vec3_to_vec4`](fn.vec3_to_vec4.html) -pub fn vec3_to_vec3(v: &TVec3) -> TVec3 { - *v +pub fn vec3_to_vec3(v: &TVec3) -> TVec3 { + v.clone() } /// Creates a 3D vector from another vector. @@ -294,8 +296,8 @@ pub fn vec3_to_vec3(v: &TVec3) -> TVec3 { /// * [`vec3_to_vec1`](fn.vec3_to_vec1.html) /// * [`vec3_to_vec2`](fn.vec3_to_vec2.html) /// * [`vec3_to_vec4`](fn.vec3_to_vec4.html) -pub fn vec4_to_vec3(v: &TVec4) -> TVec3 { - TVec3::new(v.x, v.y, v.z) +pub fn vec4_to_vec3(v: &TVec4) -> TVec3 { + TVec3::new(v.x.inlined_clone(), v.y.inlined_clone(), v.z.inlined_clone()) } /// Creates a 3D vector from another vector. @@ -305,7 +307,7 @@ pub fn vec4_to_vec3(v: &TVec4) -> TVec3 { /// * [`make_vec1`](fn.make_vec1.html) /// * [`make_vec2`](fn.make_vec2.html) /// * [`make_vec4`](fn.make_vec4.html) -pub fn make_vec3(ptr: &[N]) -> TVec3 { +pub fn make_vec3(ptr: &[N]) -> TVec3 { TVec3::from_column_slice(ptr) } @@ -367,8 +369,8 @@ pub fn vec3_to_vec4(v: &TVec3) -> TVec4 { /// * [`vec4_to_vec1`](fn.vec4_to_vec1.html) /// * [`vec4_to_vec2`](fn.vec4_to_vec2.html) /// * [`vec4_to_vec3`](fn.vec4_to_vec3.html) -pub fn vec4_to_vec4(v: &TVec4) -> TVec4 { - *v +pub fn vec4_to_vec4(v: &TVec4) -> TVec4 { + v.clone() } /// Creates a 4D vector from another vector. @@ -378,18 +380,18 @@ pub fn vec4_to_vec4(v: &TVec4) -> TVec4 { /// * [`make_vec1`](fn.make_vec1.html) /// * [`make_vec2`](fn.make_vec2.html) /// * [`make_vec3`](fn.make_vec3.html) -pub fn make_vec4(ptr: &[N]) -> TVec4 { +pub fn make_vec4(ptr: &[N]) -> TVec4 { TVec4::from_column_slice(ptr) } /// Converts a matrix or vector to a slice arranged in column-major order. -pub fn value_ptr(x: &TMat) -> &[N] +pub fn value_ptr(x: &TMat) -> &[N] where DefaultAllocator: Alloc { x.as_slice() } /// Converts a matrix or vector to a mutable slice arranged in column-major order. -pub fn value_ptr_mut(x: &mut TMat) -> &mut [N] +pub fn value_ptr_mut(x: &mut TMat) -> &mut [N] where DefaultAllocator: Alloc { x.as_mut_slice() } diff --git a/nalgebra-glm/src/gtc/ulp.rs b/nalgebra-glm/src/gtc/ulp.rs index 42ef2d05..07116b15 100644 --- a/nalgebra-glm/src/gtc/ulp.rs +++ b/nalgebra-glm/src/gtc/ulp.rs @@ -7,7 +7,7 @@ pub fn float_distance(x: T, y: T) -> u64 { unimplemented!() } -pub fn float_distance2(x: &TVec2, y: &TVec2) -> TVec { +pub fn float_distance2(x: &TVec2, y: &TVec2) -> TVec { unimplemented!() } diff --git a/nalgebra-glm/src/integer.rs b/nalgebra-glm/src/integer.rs index 3a2641e0..73cae447 100644 --- a/nalgebra-glm/src/integer.rs +++ b/nalgebra-glm/src/integer.rs @@ -7,22 +7,22 @@ pub fn bitCount(v: T) -> i32 { unimplemented!() } -pub fn bitCount2(v: &TVec) -> TVec +pub fn bitCount2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn bitfieldExtract(Value: &TVec, Offset: i32, Bits: i32) -> TVec +pub fn bitfieldExtract(Value: &TVec, Offset: i32, Bits: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn bitfieldInsert(Base: &TVec, Insert: &TVec, Offset: i32, Bits: i32) -> TVec +pub fn bitfieldInsert(Base: &TVec, Insert: &TVec, Offset: i32, Bits: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn bitfieldReverse(v: &TVec) -> TVec +pub fn bitfieldReverse(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -31,7 +31,7 @@ pub fn findLSB(x: IU) -> u32 { unimplemented!() } -pub fn findLSB2(v: &TVec) -> TVec +pub fn findLSB2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -40,27 +40,27 @@ pub fn findMSB(x: IU) -> i32 { unimplemented!() } -pub fn findMSB2(v: &TVec) -> TVec +pub fn findMSB2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn imulExtended(x: &TVec, y: &TVec, msb: &TVec, lsb: &TVec) +pub fn imulExtended(x: &TVec, y: &TVec, msb: &TVec, lsb: &TVec) where DefaultAllocator: Alloc { unimplemented!() } -pub fn uaddCarry(x: &TVec, y: &TVec, carry: &TVec) -> TVec +pub fn uaddCarry(x: &TVec, y: &TVec, carry: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn umulExtended(x: &TVec, y: &TVec, msb: &TVec, lsb: &TVec) +pub fn umulExtended(x: &TVec, y: &TVec, msb: &TVec, lsb: &TVec) where DefaultAllocator: Alloc { unimplemented!() } -pub fn usubBorrow(x: &TVec, y: &TVec, borrow: &TVec) -> TVec +pub fn usubBorrow(x: &TVec, y: &TVec, borrow: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } diff --git a/nalgebra-glm/src/matrix.rs b/nalgebra-glm/src/matrix.rs index e83687dc..c9c177b4 100644 --- a/nalgebra-glm/src/matrix.rs +++ b/nalgebra-glm/src/matrix.rs @@ -40,7 +40,7 @@ where } /// The transpose of the matrix `m`. -pub fn transpose(x: &TMat) -> TMat +pub fn transpose(x: &TMat) -> TMat where DefaultAllocator: Alloc { x.transpose() } diff --git a/nalgebra-glm/src/packing.rs b/nalgebra-glm/src/packing.rs index 641ad159..e0169cbe 100644 --- a/nalgebra-glm/src/packing.rs +++ b/nalgebra-glm/src/packing.rs @@ -3,50 +3,50 @@ use na::Scalar; use crate::aliases::{Vec2, Vec4, UVec2}; -pub fn packDouble2x32(v: &UVec2) -> f64 { +pub fn packDouble2x32(v: &UVec2) -> f64 { unimplemented!() } -pub fn packHalf2x16(v: &Vec2) -> u32 { +pub fn packHalf2x16(v: &Vec2) -> u32 { unimplemented!() } -pub fn packSnorm2x16(v: &Vec2) -> u32 { +pub fn packSnorm2x16(v: &Vec2) -> u32 { unimplemented!() } -pub fn packSnorm4x8(v: &Vec4) -> u32 { +pub fn packSnorm4x8(v: &Vec4) -> u32 { unimplemented!() } -pub fn packUnorm2x16(v: &Vec2) -> u32 { +pub fn packUnorm2x16(v: &Vec2) -> u32 { unimplemented!() } -pub fn packUnorm4x8(v: &Vec4) -> u32 { +pub fn packUnorm4x8(v: &Vec4) -> u32 { unimplemented!() } -pub fn unpackDouble2x32(v: f64) -> UVec2 { +pub fn unpackDouble2x32(v: f64) -> UVec2 { unimplemented!() } -pub fn unpackHalf2x16(v: u32) -> Vec2 { +pub fn unpackHalf2x16(v: u32) -> Vec2 { unimplemented!() } -pub fn unpackSnorm2x16(p: u32) -> Vec2 { +pub fn unpackSnorm2x16(p: u32) -> Vec2 { unimplemented!() } -pub fn unpackSnorm4x8(p: u32) -> Vec4 { +pub fn unpackSnorm4x8(p: u32) -> Vec4 { unimplemented!() } -pub fn unpackUnorm2x16(p: u32) -> Vec2 { +pub fn unpackUnorm2x16(p: u32) -> Vec2 { unimplemented!() } -pub fn unpackUnorm4x8(p: u32) -> Vec4 { +pub fn unpackUnorm4x8(p: u32) -> Vec4 { unimplemented!() } diff --git a/nalgebra-glm/src/traits.rs b/nalgebra-glm/src/traits.rs index ac3aa667..15efb72b 100644 --- a/nalgebra-glm/src/traits.rs +++ b/nalgebra-glm/src/traits.rs @@ -20,7 +20,7 @@ impl + Signed + Fr {} #[doc(hidden)] -pub trait Alloc: +pub trait Alloc: Allocator + Allocator + Allocator @@ -50,7 +50,7 @@ pub trait Alloc: { } -impl Alloc for T where T: Allocator +impl Alloc for T where T: Allocator + Allocator + Allocator + Allocator diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs index 2014da76..7dce3a13 100644 --- a/nalgebra-lapack/src/cholesky.rs +++ b/nalgebra-lapack/src/cholesky.rs @@ -28,7 +28,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct Cholesky +pub struct Cholesky where DefaultAllocator: Allocator { l: MatrixN, diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 0db48fc1..c645e228 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -33,7 +33,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct Eigen +pub struct Eigen where DefaultAllocator: Allocator + Allocator { /// The eigenvalues of the decomposed matrix. @@ -311,7 +311,7 @@ where DefaultAllocator: Allocator + Allocator */ /// Trait implemented by scalar type for which Lapack function exist to compute the /// eigendecomposition. -pub trait EigenScalar: Scalar + Copy { +pub trait EigenScalar: Scalar + Clone { #[allow(missing_docs)] fn xgeev( jobvl: u8, diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index 8048bfcc..92ec6cda 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -30,7 +30,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct Hessenberg> +pub struct Hessenberg> where DefaultAllocator: Allocator + Allocator> { h: MatrixN, diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index b087aea7..a636a722 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -37,7 +37,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct LU, C: Dim> +pub struct LU, C: Dim> where DefaultAllocator: Allocator> + Allocator { lu: MatrixMN, diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index 58a040d5..7aa10cb5 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -33,7 +33,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct QR, C: Dim> +pub struct QR, C: Dim> where DefaultAllocator: Allocator + Allocator> { qr: MatrixMN, diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index e61a22ea..69d0a29a 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -33,7 +33,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct Schur +pub struct Schur where DefaultAllocator: Allocator + Allocator { re: VectorN, @@ -162,7 +162,7 @@ where DefaultAllocator: Allocator + Allocator * */ /// Trait implemented by scalars for which Lapack implements the RealField Schur decomposition. -pub trait SchurScalar: Scalar + Copy { +pub trait SchurScalar: Scalar + Clone { #[allow(missing_docs)] fn xgees( jobvs: u8, diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 1cd0d24b..ac77fba1 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -36,7 +36,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct SVD, C: Dim> +pub struct SVD, C: Dim> where DefaultAllocator: Allocator + Allocator> + Allocator { /// The left-singular vectors `U` of this SVD. @@ -57,7 +57,7 @@ where /// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex`, `Complex`) /// supported by the Singular Value Decompotition. -pub trait SVDScalar, C: Dim>: Scalar + Copy +pub trait SVDScalar, C: Dim>: Scalar + Clone where DefaultAllocator: Allocator + Allocator + Allocator> diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index d50ee805..eccdf0f7 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -35,7 +35,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct SymmetricEigen +pub struct SymmetricEigen where DefaultAllocator: Allocator + Allocator { /// The eigenvectors of the decomposed matrix. @@ -169,7 +169,7 @@ where DefaultAllocator: Allocator + Allocator */ /// Trait implemented by scalars for which Lapack implements the eigendecomposition of symmetric /// real matrices. -pub trait SymmetricEigenScalar: Scalar + Copy { +pub trait SymmetricEigenScalar: Scalar + Clone { #[allow(missing_docs)] fn xsyev( jobz: u8, diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 246f3620..3625b059 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -16,7 +16,7 @@ use crate::base::{DefaultAllocator, Scalar}; /// /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. -pub trait Allocator: Any + Sized { +pub trait Allocator: Any + Sized { /// The type of buffer this allocator can instanciate. type Buffer: ContiguousStorageMut + Clone; @@ -33,7 +33,7 @@ pub trait Allocator: Any + Sized { /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// CFrom) elements to a smaller or larger size (RTo, CTo). -pub trait Reallocator: +pub trait Reallocator: Allocator + Allocator { /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer @@ -65,7 +65,7 @@ where R2: Dim, C1: Dim, C2: Dim, - N: Scalar + Copy, + N: Scalar + Clone, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } @@ -76,7 +76,7 @@ where R2: Dim, C1: Dim, C2: Dim, - N: Scalar + Copy, + N: Scalar + Clone, DefaultAllocator: Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, {} @@ -88,7 +88,7 @@ pub trait SameShapeVectorAllocator: where R1: Dim, R2: Dim, - N: Scalar + Copy, + N: Scalar + Clone, ShapeConstraint: SameNumberOfRows, { } @@ -97,7 +97,7 @@ impl SameShapeVectorAllocator for DefaultAllocator where R1: Dim, R2: Dim, - N: Scalar + Copy, + N: Scalar + Clone, DefaultAllocator: Allocator + Allocator>, ShapeConstraint: SameNumberOfRows, {} diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 17247356..ccddcce6 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -154,7 +154,7 @@ where unsafe impl Storage for ArrayStorage where - N: Scalar + Copy, + N: Scalar + Clone, R: DimName, C: DimName, R::Value: Mul, @@ -206,7 +206,7 @@ where unsafe impl StorageMut for ArrayStorage where - N: Scalar + Copy, + N: Scalar + Clone, R: DimName, C: DimName, R::Value: Mul, @@ -226,7 +226,7 @@ where unsafe impl ContiguousStorage for ArrayStorage where - N: Scalar + Copy, + N: Scalar + Clone, R: DimName, C: DimName, R::Value: Mul, @@ -236,7 +236,7 @@ where unsafe impl ContiguousStorageMut for ArrayStorage where - N: Scalar + Copy, + N: Scalar + Clone, R: DimName, C: DimName, R::Value: Mul, @@ -253,7 +253,7 @@ where #[cfg(feature = "serde-serialize")] impl Serialize for ArrayStorage where - N: Scalar + Copy + Serialize, + N: Scalar + Clone + Serialize, R: DimName, C: DimName, R::Value: Mul, @@ -274,7 +274,7 @@ where #[cfg(feature = "serde-serialize")] impl<'a, N, R, C> Deserialize<'a> for ArrayStorage where - N: Scalar + Copy + Deserialize<'a>, + N: Scalar + Clone + Deserialize<'a>, R: DimName, C: DimName, R::Value: Mul, @@ -295,7 +295,7 @@ struct ArrayStorageVisitor { #[cfg(feature = "serde-serialize")] impl ArrayStorageVisitor where - N: Scalar + Copy, + N: Scalar + Clone, R: DimName, C: DimName, R::Value: Mul, @@ -312,7 +312,7 @@ where #[cfg(feature = "serde-serialize")] impl<'a, N, R, C> Visitor<'a> for ArrayStorageVisitor where - N: Scalar + Copy + Deserialize<'a>, + N: Scalar + Clone + Deserialize<'a>, R: DimName, C: DimName, R::Value: Mul, diff --git a/src/base/blas.rs b/src/base/blas.rs index a999b6eb..5cdc52dc 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -48,7 +48,7 @@ impl> Vector { } } -impl> Vector { +impl> Vector { /// Computes the index and value of the vector component with the largest value. /// /// # Examples: @@ -74,7 +74,7 @@ impl> Vector { } } - (the_i, *the_max) + (the_i, the_max.inlined_clone()) } /// Computes the index of the vector component with the largest value. @@ -145,7 +145,7 @@ impl> Vector { } } - (the_i, *the_min) + (the_i, the_min.inlined_clone()) } /// Computes the index of the vector component with the smallest value. @@ -230,7 +230,7 @@ impl> Matrix { } -impl> Matrix { +impl> Matrix { /// Computes the index of the matrix component with the largest absolute value. /// /// # Examples: @@ -264,7 +264,7 @@ impl } impl> Matrix -where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul +where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul { #[inline(always)] fn dotx(&self, rhs: &Matrix, conjugate: impl Fn(N) -> N) -> N @@ -281,27 +281,27 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul // because the `for` loop below won't be very efficient on those. if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { - let a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0)); - let b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0)); + let a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone(); + let b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone(); return a + b; } } if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { - let a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0)); - let b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0)); - let c = conjugate(*self.get_unchecked((2, 0))) * *rhs.get_unchecked((2, 0)); + let a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone(); + let b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone(); + let c = conjugate(self.get_unchecked((2, 0)).inlined_clone()) * rhs.get_unchecked((2, 0)).inlined_clone(); return a + b + c; } } if (R::is::() || R2::is::()) && (C::is::() || C2::is::()) { unsafe { - let mut a = conjugate(*self.get_unchecked((0, 0))) * *rhs.get_unchecked((0, 0)); - let mut b = conjugate(*self.get_unchecked((1, 0))) * *rhs.get_unchecked((1, 0)); - let c = conjugate(*self.get_unchecked((2, 0))) * *rhs.get_unchecked((2, 0)); - let d = conjugate(*self.get_unchecked((3, 0))) * *rhs.get_unchecked((3, 0)); + let mut a = conjugate(self.get_unchecked((0, 0)).inlined_clone()) * rhs.get_unchecked((0, 0)).inlined_clone(); + let mut b = conjugate(self.get_unchecked((1, 0)).inlined_clone()) * rhs.get_unchecked((1, 0)).inlined_clone(); + let c = conjugate(self.get_unchecked((2, 0)).inlined_clone()) * rhs.get_unchecked((2, 0)).inlined_clone(); + let d = conjugate(self.get_unchecked((3, 0)).inlined_clone()) * rhs.get_unchecked((3, 0)).inlined_clone(); a += c; b += d; @@ -341,14 +341,14 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul acc7 = N::zero(); while self.nrows() - i >= 8 { - acc0 += unsafe { conjugate(*self.get_unchecked((i + 0, j))) * *rhs.get_unchecked((i + 0, j)) }; - acc1 += unsafe { conjugate(*self.get_unchecked((i + 1, j))) * *rhs.get_unchecked((i + 1, j)) }; - acc2 += unsafe { conjugate(*self.get_unchecked((i + 2, j))) * *rhs.get_unchecked((i + 2, j)) }; - acc3 += unsafe { conjugate(*self.get_unchecked((i + 3, j))) * *rhs.get_unchecked((i + 3, j)) }; - acc4 += unsafe { conjugate(*self.get_unchecked((i + 4, j))) * *rhs.get_unchecked((i + 4, j)) }; - acc5 += unsafe { conjugate(*self.get_unchecked((i + 5, j))) * *rhs.get_unchecked((i + 5, j)) }; - acc6 += unsafe { conjugate(*self.get_unchecked((i + 6, j))) * *rhs.get_unchecked((i + 6, j)) }; - acc7 += unsafe { conjugate(*self.get_unchecked((i + 7, j))) * *rhs.get_unchecked((i + 7, j)) }; + acc0 += unsafe { conjugate(self.get_unchecked((i + 0, j)).inlined_clone()) * rhs.get_unchecked((i + 0, j)).inlined_clone() }; + acc1 += unsafe { conjugate(self.get_unchecked((i + 1, j)).inlined_clone()) * rhs.get_unchecked((i + 1, j)).inlined_clone() }; + acc2 += unsafe { conjugate(self.get_unchecked((i + 2, j)).inlined_clone()) * rhs.get_unchecked((i + 2, j)).inlined_clone() }; + acc3 += unsafe { conjugate(self.get_unchecked((i + 3, j)).inlined_clone()) * rhs.get_unchecked((i + 3, j)).inlined_clone() }; + acc4 += unsafe { conjugate(self.get_unchecked((i + 4, j)).inlined_clone()) * rhs.get_unchecked((i + 4, j)).inlined_clone() }; + acc5 += unsafe { conjugate(self.get_unchecked((i + 5, j)).inlined_clone()) * rhs.get_unchecked((i + 5, j)).inlined_clone() }; + acc6 += unsafe { conjugate(self.get_unchecked((i + 6, j)).inlined_clone()) * rhs.get_unchecked((i + 6, j)).inlined_clone() }; + acc7 += unsafe { conjugate(self.get_unchecked((i + 7, j)).inlined_clone()) * rhs.get_unchecked((i + 7, j)).inlined_clone() }; i += 8; } @@ -358,7 +358,7 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul res += acc3 + acc7; for k in i..self.nrows() { - res += unsafe { conjugate(*self.get_unchecked((k, j))) * *rhs.get_unchecked((k, j)) } + res += unsafe { conjugate(self.get_unchecked((k, j)).inlined_clone()) * rhs.get_unchecked((k, j)).inlined_clone() } } } @@ -460,7 +460,7 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul for j in 0..self.nrows() { for i in 0..self.ncols() { - res += unsafe { *self.get_unchecked((j, i)) * *rhs.get_unchecked((i, j)) } + res += unsafe { self.get_unchecked((j, i)).inlined_clone() * rhs.get_unchecked((i, j)).inlined_clone() } } } @@ -469,27 +469,27 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul } fn array_axcpy(y: &mut [N], a: N, x: &[N], c: N, beta: N, stride1: usize, stride2: usize, len: usize) -where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul { +where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul { for i in 0..len { unsafe { let y = y.get_unchecked_mut(i * stride1); - *y = a * *x.get_unchecked(i * stride2) * c + beta * *y; + *y = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone() + beta.inlined_clone() * y.inlined_clone(); } } } fn array_axc(y: &mut [N], a: N, x: &[N], c: N, stride1: usize, stride2: usize, len: usize) -where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul { +where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul { for i in 0..len { unsafe { - *y.get_unchecked_mut(i * stride1) = a * *x.get_unchecked(i * stride2) * c; + *y.get_unchecked_mut(i * stride1) = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone(); } } } impl Vector where - N: Scalar + Copy + Zero + ClosedAdd + ClosedMul, + N: Scalar + Clone + Zero + ClosedAdd + ClosedMul, S: StorageMut, { /// Computes `self = a * x * c + b * self`. @@ -602,14 +602,14 @@ where // FIXME: avoid bound checks. let col2 = a.column(0); - let val = unsafe { *x.vget_unchecked(0) }; - self.axcpy(alpha, &col2, val, beta); + let val = unsafe { x.vget_unchecked(0).inlined_clone() }; + self.axcpy(alpha.inlined_clone(), &col2, val, beta); for j in 1..ncols2 { let col2 = a.column(j); - let val = unsafe { *x.vget_unchecked(j) }; + let val = unsafe { x.vget_unchecked(j).inlined_clone() }; - self.axcpy(alpha, &col2, val, N::one()); + self.axcpy(alpha.inlined_clone(), &col2, val, N::one()); } } @@ -647,9 +647,9 @@ where // FIXME: avoid bound checks. let col2 = a.column(0); - let val = unsafe { *x.vget_unchecked(0) }; - self.axpy(alpha * val, &col2, beta); - self[0] += alpha * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); + let val = unsafe { x.vget_unchecked(0).inlined_clone() }; + self.axpy(alpha.inlined_clone() * val, &col2, beta); + self[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); for j in 1..dim2 { let col2 = a.column(j); @@ -657,11 +657,11 @@ where let val; unsafe { - val = *x.vget_unchecked(j); - *self.vget_unchecked_mut(j) += alpha * dot; + val = x.vget_unchecked(j).inlined_clone(); + *self.vget_unchecked_mut(j) += alpha.inlined_clone() * dot; } self.rows_range_mut(j + 1..) - .axpy(alpha * val, &col2.rows_range(j + 1..), N::one()); + .axpy(alpha.inlined_clone() * val, &col2.rows_range(j + 1..), N::one()); } } @@ -804,12 +804,12 @@ where if beta.is_zero() { for j in 0..ncols2 { let val = unsafe { self.vget_unchecked_mut(j) }; - *val = alpha * dot(&a.column(j), x) + *val = alpha.inlined_clone() * dot(&a.column(j), x) } } else { for j in 0..ncols2 { let val = unsafe { self.vget_unchecked_mut(j) }; - *val = alpha * dot(&a.column(j), x) + beta * *val; + *val = alpha.inlined_clone() * dot(&a.column(j), x) + beta.inlined_clone() * val.inlined_clone(); } } } @@ -886,7 +886,7 @@ where } impl> Matrix -where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul +where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul { #[inline(always)] fn gerx( @@ -913,8 +913,8 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul for j in 0..ncols1 { // FIXME: avoid bound checks. - let val = unsafe { conjugate(*y.vget_unchecked(j)) }; - self.column_mut(j).axpy(alpha * val, x, beta); + let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) }; + self.column_mut(j).axpy(alpha.inlined_clone() * val, x, beta.inlined_clone()); } } @@ -1128,7 +1128,7 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul for j1 in 0..ncols1 { // FIXME: avoid bound checks. - self.column_mut(j1).gemv(alpha, a, &b.column(j1), beta); + self.column_mut(j1).gemv(alpha.inlined_clone(), a, &b.column(j1), beta.inlined_clone()); } } @@ -1185,7 +1185,7 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul for j1 in 0..ncols1 { // FIXME: avoid bound checks. - self.column_mut(j1).gemv_tr(alpha, a, &b.column(j1), beta); + self.column_mut(j1).gemv_tr(alpha.inlined_clone(), a, &b.column(j1), beta.inlined_clone()); } } @@ -1249,7 +1249,7 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul } impl> Matrix -where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul +where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul { #[inline(always)] fn xxgerx( @@ -1276,13 +1276,13 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul assert!(dim1 == dim2 && dim1 == dim3, "ger: dimensions mismatch."); for j in 0..dim1 { - let val = unsafe { conjugate(*y.vget_unchecked(j)) }; + let val = unsafe { conjugate(y.vget_unchecked(j).inlined_clone()) }; let subdim = Dynamic::new(dim1 - j); // FIXME: avoid bound checks. self.generic_slice_mut((j, j), (subdim, U1)).axpy( - alpha * val, + alpha.inlined_clone() * val, &x.rows_range(j..), - beta, + beta.inlined_clone(), ); } } @@ -1396,7 +1396,7 @@ where N: Scalar + Copy + Zero + ClosedAdd + ClosedMul } impl> SquareMatrix -where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul +where N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul { /// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`. /// @@ -1442,11 +1442,11 @@ where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { work.gemv(N::one(), lhs, &mid.column(0), N::zero()); - self.ger(alpha, work, &lhs.column(0), beta); + self.ger(alpha.inlined_clone(), work, &lhs.column(0), beta); for j in 1..mid.ncols() { work.gemv(N::one(), lhs, &mid.column(j), N::zero()); - self.ger(alpha, work, &lhs.column(j), N::one()); + self.ger(alpha.inlined_clone(), work, &lhs.column(j), N::one()); } } @@ -1534,11 +1534,11 @@ where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul DimEq + DimEq + DimEq + AreMultipliable, { work.gemv(N::one(), mid, &rhs.column(0), N::zero()); - self.column_mut(0).gemv_tr(alpha, &rhs, work, beta); + self.column_mut(0).gemv_tr(alpha.inlined_clone(), &rhs, work, beta.inlined_clone()); for j in 1..rhs.ncols() { work.gemv(N::one(), mid, &rhs.column(j), N::zero()); - self.column_mut(j).gemv_tr(alpha, &rhs, work, beta); + self.column_mut(j).gemv_tr(alpha.inlined_clone(), &rhs, work, beta.inlined_clone()); } } diff --git a/src/base/cg.rs b/src/base/cg.rs index 5908c111..262ffecf 100644 --- a/src/base/cg.rs +++ b/src/base/cg.rs @@ -23,7 +23,7 @@ use alga::linear::Transformation; impl MatrixN where - N: Scalar + Copy + Ring, + N: Scalar + Clone + Ring, DefaultAllocator: Allocator, { /// Creates a new homogeneous matrix that applies the same scaling factor on each dimension. @@ -44,7 +44,7 @@ where { let mut res = Self::one(); for i in 0..scaling.len() { - res[(i, i)] = scaling[i]; + res[(i, i)] = scaling[i].inlined_clone(); } res @@ -153,7 +153,7 @@ impl Matrix4 { } } -impl> SquareMatrix { +impl> SquareMatrix { /// Computes the transformation equal to `self` followed by an uniform scaling factor. #[inline] pub fn append_scaling(&self, scaling: N) -> MatrixN @@ -240,7 +240,7 @@ impl> SquareMatrix> SquareMatrix { +impl> SquareMatrix { /// Computes in-place the transformation equal to `self` followed by an uniform scaling factor. #[inline] pub fn append_scaling_mut(&mut self, scaling: N) @@ -266,7 +266,7 @@ impl> SquareMatrix(i); - to_scale *= scaling[i]; + to_scale *= scaling[i].inlined_clone(); } } @@ -281,7 +281,7 @@ impl> SquareMatrix(i); - to_scale *= scaling[i]; + to_scale *= scaling[i].inlined_clone(); } } @@ -294,7 +294,7 @@ impl> SquareMatrix = MatrixSum; -impl> Matrix { +impl> Matrix { /// Computes the component-wise absolute value. /// /// # Example @@ -45,7 +45,7 @@ impl> Matrix { macro_rules! component_binop_impl( ($($binop: ident, $binop_mut: ident, $binop_assign: ident, $cmpy: ident, $Trait: ident . $op: ident . $op_assign: ident, $desc:expr, $desc_cmpy:expr, $desc_mut:expr);* $(;)*) => {$( - impl> Matrix { + impl> Matrix { #[doc = $desc] #[inline] pub fn $binop(&self, rhs: &Matrix) -> MatrixComponentOp @@ -61,7 +61,7 @@ macro_rules! component_binop_impl( for j in 0 .. res.ncols() { for i in 0 .. res.nrows() { unsafe { - res.get_unchecked_mut((i, j)).$op_assign(*rhs.get_unchecked((i, j))); + res.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).inlined_clone()); } } } @@ -70,7 +70,7 @@ macro_rules! component_binop_impl( } } - impl> Matrix { + impl> Matrix { // componentwise binop plus Y. #[doc = $desc_cmpy] #[inline] @@ -89,7 +89,7 @@ macro_rules! component_binop_impl( for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { unsafe { - let res = alpha * a.get_unchecked((i, j)).$op(*b.get_unchecked((i, j))); + let res = alpha.inlined_clone() * a.get_unchecked((i, j)).inlined_clone().$op(b.get_unchecked((i, j)).inlined_clone()); *self.get_unchecked_mut((i, j)) = res; } } @@ -99,8 +99,8 @@ macro_rules! component_binop_impl( for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { unsafe { - let res = alpha * a.get_unchecked((i, j)).$op(*b.get_unchecked((i, j))); - *self.get_unchecked_mut((i, j)) = beta * *self.get_unchecked((i, j)) + res; + let res = alpha.inlined_clone() * a.get_unchecked((i, j)).inlined_clone().$op(b.get_unchecked((i, j)).inlined_clone()); + *self.get_unchecked_mut((i, j)) = beta.inlined_clone() * self.get_unchecked((i, j)).inlined_clone() + res; } } } @@ -121,7 +121,7 @@ macro_rules! component_binop_impl( for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { unsafe { - self.get_unchecked_mut((i, j)).$op_assign(*rhs.get_unchecked((i, j))); + self.get_unchecked_mut((i, j)).$op_assign(rhs.get_unchecked((i, j)).inlined_clone()); } } } diff --git a/src/base/construction.rs b/src/base/construction.rs index d7f1fef8..6925351d 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -27,7 +27,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vec * Generic constructors. * */ -impl MatrixMN +impl MatrixMN where DefaultAllocator: Allocator { /// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics @@ -84,7 +84,7 @@ where DefaultAllocator: Allocator for i in 0..nrows.value() { for j in 0..ncols.value() { - unsafe { *res.get_unchecked_mut((i, j)) = *iter.next().unwrap() } + unsafe { *res.get_unchecked_mut((i, j)) = iter.next().unwrap().inlined_clone() } } } @@ -134,7 +134,7 @@ where DefaultAllocator: Allocator let mut res = Self::zeros_generic(nrows, ncols); for i in 0..crate::min(nrows.value(), ncols.value()) { - unsafe { *res.get_unchecked_mut((i, i)) = elt } + unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() } } res @@ -154,7 +154,7 @@ where DefaultAllocator: Allocator ); for (i, elt) in elts.iter().enumerate() { - unsafe { *res.get_unchecked_mut((i, i)) = *elt } + unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() } } res @@ -196,7 +196,7 @@ where DefaultAllocator: Allocator // FIXME: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - rows[i][(0, j)] + rows[i][(0, j)].inlined_clone() }) } @@ -236,7 +236,7 @@ where DefaultAllocator: Allocator // FIXME: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - columns[j][i] + columns[j][i].inlined_clone() }) } @@ -286,7 +286,7 @@ where DefaultAllocator: Allocator impl MatrixN where - N: Scalar + Copy, + N: Scalar + Clone, DefaultAllocator: Allocator, { /// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0. @@ -315,7 +315,7 @@ where for i in 0..diag.len() { unsafe { - *res.get_unchecked_mut((i, i)) = *diag.vget_unchecked(i); + *res.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone(); } } @@ -330,7 +330,7 @@ where */ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl MatrixMN + impl MatrixMN where DefaultAllocator: Allocator { /// Creates a new uninitialized matrix or vector. @@ -559,7 +559,7 @@ macro_rules! impl_constructors( } } - impl MatrixMN + impl MatrixMN where DefaultAllocator: Allocator, Standard: Distribution { @@ -603,7 +603,7 @@ impl_constructors!(Dynamic, Dynamic; */ macro_rules! impl_constructors_from_data( ($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl MatrixMN + impl MatrixMN where DefaultAllocator: Allocator { /// Creates a matrix with its elements filled with the components provided by a slice /// in row-major order. @@ -721,7 +721,7 @@ impl_constructors_from_data!(data; Dynamic, Dynamic; */ impl Zero for MatrixMN where - N: Scalar + Copy + Zero + ClosedAdd, + N: Scalar + Clone + Zero + ClosedAdd, DefaultAllocator: Allocator, { #[inline] @@ -737,7 +737,7 @@ where impl One for MatrixN where - N: Scalar + Copy + Zero + One + ClosedMul + ClosedAdd, + N: Scalar + Clone + Zero + One + ClosedMul + ClosedAdd, DefaultAllocator: Allocator, { #[inline] @@ -748,7 +748,7 @@ where impl Bounded for MatrixMN where - N: Scalar + Copy + Bounded, + N: Scalar + Clone + Bounded, DefaultAllocator: Allocator, { #[inline] @@ -762,7 +762,7 @@ where } } -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -781,7 +781,7 @@ impl Arbitrary for MatrixMN where R: Dim, C: Dim, - N: Scalar + Copy + Arbitrary + Send, + N: Scalar + Clone + Arbitrary + Send, DefaultAllocator: Allocator, Owned: Clone + Send, { @@ -822,7 +822,7 @@ where macro_rules! componentwise_constructors_impl( ($($R: ty, $C: ty, $($args: ident:($irow: expr,$icol: expr)),*);* $(;)*) => {$( impl MatrixMN - where N: Scalar + Copy, + where N: Scalar + Clone, DefaultAllocator: Allocator { /// Initializes this matrix from its components. #[inline] @@ -990,7 +990,7 @@ componentwise_constructors_impl!( */ impl VectorN where - N: Scalar + Copy + Zero + One, + N: Scalar + Clone + Zero + One, DefaultAllocator: Allocator, { /// The column vector with a 1 as its first component, and zero elsewhere. diff --git a/src/base/construction_slice.rs b/src/base/construction_slice.rs index 029abc69..0e1d28f8 100644 --- a/src/base/construction_slice.rs +++ b/src/base/construction_slice.rs @@ -8,7 +8,7 @@ use num_rational::Ratio; * Slice constructors. * */ -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMN<'a, N, R, C, RStride, CStride> { /// Creates, without bound-checking, a matrix slice from an array and with dimensions and strides specified by generic types instances. @@ -61,7 +61,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> } } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMutMN<'a, N, R, C, RStride, CStride> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions and strides specified by generic types instances. @@ -133,7 +133,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> } } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> { /// Creates, without bound-checking, a matrix slice from an array and with dimensions specified by generic types instances. /// /// This method is unsafe because the input data array is not checked to contain enough elements. @@ -159,7 +159,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> { } } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions specified by generic types instances. /// /// This method is unsafe because the input data array is not checked to contain enough elements. @@ -187,7 +187,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> { macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound),*> MatrixSliceMN<'a, N, $($Dims),*> { + impl<'a, N: Scalar + Clone, $($DimIdent: $DimBound),*> MatrixSliceMN<'a, N, $($Dims),*> { /// Creates a new matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -203,7 +203,7 @@ macro_rules! impl_constructors( } } - impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound, )*> MatrixSliceMN<'a, N, $($Dims,)* Dynamic, Dynamic> { + impl<'a, N: Scalar + Clone, $($DimIdent: $DimBound, )*> MatrixSliceMN<'a, N, $($Dims,)* Dynamic, Dynamic> { /// Creates a new matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -244,7 +244,7 @@ impl_constructors!(Dynamic, Dynamic; macro_rules! impl_constructors_mut( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> { + impl<'a, N: Scalar + Clone, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> { /// Creates a new mutable matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -260,7 +260,7 @@ macro_rules! impl_constructors_mut( } } - impl<'a, N: Scalar + Copy, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, N, $($Dims,)* Dynamic, Dynamic> { + impl<'a, N: Scalar + Clone, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, N, $($Dims,)* Dynamic, Dynamic> { /// Creates a new mutable matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 883f3fb8..4b937556 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -31,8 +31,8 @@ where C1: Dim, R2: Dim, C2: Dim, - N1: Scalar + Copy, - N2: Scalar + Copy + SupersetOf, + N1: Scalar + Clone, + N2: Scalar + Clone + SupersetOf, DefaultAllocator: Allocator + Allocator + SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -75,7 +75,7 @@ where } } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix { type Item = &'a N; type IntoIter = MatrixIter<'a, N, R, C, S>; @@ -85,7 +85,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage> IntoIterator for } } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut> IntoIterator +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut> IntoIterator for &'a mut Matrix { type Item = &'a mut N; @@ -100,7 +100,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut> IntoIterator macro_rules! impl_from_into_asref_1D( ($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$( impl From<[N; $SZ]> for MatrixMN - where N: Scalar + Copy, + where N: Scalar + Clone, DefaultAllocator: Allocator { #[inline] fn from(arr: [N; $SZ]) -> Self { @@ -114,7 +114,7 @@ macro_rules! impl_from_into_asref_1D( } impl Into<[N; $SZ]> for Matrix - where N: Scalar + Copy, + where N: Scalar + Clone, S: ContiguousStorage { #[inline] fn into(self) -> [N; $SZ] { @@ -128,7 +128,7 @@ macro_rules! impl_from_into_asref_1D( } impl AsRef<[N; $SZ]> for Matrix - where N: Scalar + Copy, + where N: Scalar + Clone, S: ContiguousStorage { #[inline] fn as_ref(&self) -> &[N; $SZ] { @@ -139,7 +139,7 @@ macro_rules! impl_from_into_asref_1D( } impl AsMut<[N; $SZ]> for Matrix - where N: Scalar + Copy, + where N: Scalar + Clone, S: ContiguousStorageMut { #[inline] fn as_mut(&mut self) -> &mut [N; $SZ] { @@ -168,7 +168,7 @@ impl_from_into_asref_1D!( macro_rules! impl_from_into_asref_2D( ($(($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr));* $(;)*) => {$( - impl From<[[N; $SZRows]; $SZCols]> for MatrixMN + impl From<[[N; $SZRows]; $SZCols]> for MatrixMN where DefaultAllocator: Allocator { #[inline] fn from(arr: [[N; $SZRows]; $SZCols]) -> Self { @@ -181,7 +181,7 @@ macro_rules! impl_from_into_asref_2D( } } - impl Into<[[N; $SZRows]; $SZCols]> for Matrix + impl Into<[[N; $SZRows]; $SZCols]> for Matrix where S: ContiguousStorage { #[inline] fn into(self) -> [[N; $SZRows]; $SZCols] { @@ -194,7 +194,7 @@ macro_rules! impl_from_into_asref_2D( } } - impl AsRef<[[N; $SZRows]; $SZCols]> for Matrix + impl AsRef<[[N; $SZRows]; $SZCols]> for Matrix where S: ContiguousStorage { #[inline] fn as_ref(&self) -> &[[N; $SZRows]; $SZCols] { @@ -204,7 +204,7 @@ macro_rules! impl_from_into_asref_2D( } } - impl AsMut<[[N; $SZRows]; $SZCols]> for Matrix + impl AsMut<[[N; $SZRows]; $SZCols]> for Matrix where S: ContiguousStorageMut { #[inline] fn as_mut(&mut self) -> &mut [[N; $SZRows]; $SZCols] { @@ -229,7 +229,7 @@ impl_from_into_asref_2D!( macro_rules! impl_from_into_mint_1D( ($($NRows: ident => $VT:ident [$SZ: expr]);* $(;)*) => {$( impl From> for MatrixMN - where N: Scalar + Copy, + where N: Scalar + Clone, DefaultAllocator: Allocator { #[inline] fn from(v: mint::$VT) -> Self { @@ -243,7 +243,7 @@ macro_rules! impl_from_into_mint_1D( } impl Into> for Matrix - where N: Scalar + Copy, + where N: Scalar + Clone, S: ContiguousStorage { #[inline] fn into(self) -> mint::$VT { @@ -257,7 +257,7 @@ macro_rules! impl_from_into_mint_1D( } impl AsRef> for Matrix - where N: Scalar + Copy, + where N: Scalar + Clone, S: ContiguousStorage { #[inline] fn as_ref(&self) -> &mint::$VT { @@ -268,7 +268,7 @@ macro_rules! impl_from_into_mint_1D( } impl AsMut> for Matrix - where N: Scalar + Copy, + where N: Scalar + Clone, S: ContiguousStorageMut { #[inline] fn as_mut(&mut self) -> &mut mint::$VT { @@ -292,7 +292,7 @@ impl_from_into_mint_1D!( macro_rules! impl_from_into_mint_2D( ($(($NRows: ty, $NCols: ty) => $MV:ident{ $($component:ident),* }[$SZRows: expr]);* $(;)*) => {$( impl From> for MatrixMN - where N: Scalar + Copy, + where N: Scalar + Clone, DefaultAllocator: Allocator { #[inline] fn from(m: mint::$MV) -> Self { @@ -310,7 +310,7 @@ macro_rules! impl_from_into_mint_2D( } impl Into> for MatrixMN - where N: Scalar + Copy, + where N: Scalar + Clone, DefaultAllocator: Allocator { #[inline] fn into(self) -> mint::$MV { @@ -342,7 +342,7 @@ impl_from_into_mint_2D!( impl<'a, N, R, C, RStride, CStride> From> for Matrix> where - N: Scalar + Copy, + N: Scalar + Clone, R: DimName, C: DimName, RStride: Dim, @@ -359,7 +359,7 @@ where impl<'a, N, C, RStride, CStride> From> for Matrix> where - N: Scalar + Copy, + N: Scalar + Clone, C: Dim, RStride: Dim, CStride: Dim, @@ -373,7 +373,7 @@ where impl<'a, N, R, RStride, CStride> From> for Matrix> where - N: Scalar + Copy, + N: Scalar + Clone, R: DimName, RStride: Dim, CStride: Dim, @@ -386,7 +386,7 @@ where impl<'a, N, R, C, RStride, CStride> From> for Matrix> where - N: Scalar + Copy, + N: Scalar + Clone, R: DimName, C: DimName, RStride: Dim, @@ -403,7 +403,7 @@ where impl<'a, N, C, RStride, CStride> From> for Matrix> where - N: Scalar + Copy, + N: Scalar + Clone, C: Dim, RStride: Dim, CStride: Dim, @@ -417,7 +417,7 @@ where impl<'a, N, R, RStride, CStride> From> for Matrix> where - N: Scalar + Copy, + N: Scalar + Clone, R: DimName, RStride: Dim, CStride: Dim, @@ -430,7 +430,7 @@ where impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix> for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> where - N: Scalar + Copy, + N: Scalar + Clone, R: Dim, C: Dim, RSlice: Dim, @@ -463,7 +463,7 @@ for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> where - N: Scalar + Copy, + N: Scalar + Clone, R: Dim, C: Dim, RSlice: Dim, @@ -496,7 +496,7 @@ for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> for MatrixSliceMut<'a, N, RSlice, CSlice, RStride, CStride> where - N: Scalar + Copy, + N: Scalar + Clone, R: Dim, C: Dim, RSlice: Dim, diff --git a/src/base/coordinates.rs b/src/base/coordinates.rs index 38c7edac..ee6ccf91 100644 --- a/src/base/coordinates.rs +++ b/src/base/coordinates.rs @@ -24,7 +24,7 @@ macro_rules! coords_impl( #[repr(C)] #[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] - pub struct $T { + pub struct $T { $(pub $comps: N),* } } @@ -32,7 +32,7 @@ macro_rules! coords_impl( macro_rules! deref_impl( ($R: ty, $C: ty; $Target: ident) => { - impl Deref for Matrix + impl Deref for Matrix where S: ContiguousStorage { type Target = $Target; @@ -42,7 +42,7 @@ macro_rules! deref_impl( } } - impl DerefMut for Matrix + impl DerefMut for Matrix where S: ContiguousStorageMut { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index ee6786c5..4413de27 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -36,7 +36,7 @@ pub struct DefaultAllocator; // Static - Static impl Allocator for DefaultAllocator where - N: Scalar + Copy, + N: Scalar + Clone, R: DimName, C: DimName, R::Value: Mul, @@ -76,7 +76,7 @@ where // Dynamic - Static // Dynamic - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; #[inline] @@ -107,7 +107,7 @@ impl Allocator for DefaultAllocator { // Static - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; #[inline] @@ -142,7 +142,7 @@ impl Allocator for DefaultAllocator * */ // Anything -> Static × Static -impl Reallocator for DefaultAllocator +impl Reallocator for DefaultAllocator where RFrom: Dim, CFrom: Dim, @@ -173,7 +173,7 @@ where // Static × Static -> Dynamic × Any #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator for DefaultAllocator +impl Reallocator for DefaultAllocator where RFrom: DimName, CFrom: DimName, @@ -202,7 +202,7 @@ where // Static × Static -> Static × Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator for DefaultAllocator +impl Reallocator for DefaultAllocator where RFrom: DimName, CFrom: DimName, @@ -231,7 +231,7 @@ where // All conversion from a dynamic buffer to a dynamic buffer. #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -247,7 +247,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -263,7 +263,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -279,7 +279,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] diff --git a/src/base/edition.rs b/src/base/edition.rs index cc4d4295..19937e3f 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -18,7 +18,7 @@ use crate::base::storage::{Storage, StorageMut}; use crate::base::DMatrix; use crate::base::{DefaultAllocator, Matrix, MatrixMN, RowVector, Scalar, Vector}; -impl> Matrix { +impl> Matrix { /// Extracts the upper triangular part of this matrix (including the diagonal). #[inline] pub fn upper_triangle(&self) -> MatrixMN @@ -64,7 +64,7 @@ impl> Matrix> Matrix> Matrix { +impl> Matrix { /// Sets all the elements of this matrix to `val`. #[inline] pub fn fill(&mut self, val: N) { for e in self.iter_mut() { - *e = val + *e = val.inlined_clone() } } @@ -116,7 +116,7 @@ impl> Matrix> Matrix> Matrix> Matrix> Matrix> Matrix> Matrix> Matrix { +impl> Matrix { /// Copies the upper-triangle of this matrix to its lower-triangular part. /// /// This makes the matrix symmetric. Panics if the matrix is not square. @@ -264,7 +264,7 @@ impl> Matrix { for j in 0..dim { for i in j + 1..dim { unsafe { - *self.get_unchecked_mut((i, j)) = *self.get_unchecked((j, i)); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); } } } @@ -279,7 +279,7 @@ impl> Matrix { for j in 1..self.ncols() { for i in 0..j { unsafe { - *self.get_unchecked_mut((i, j)) = *self.get_unchecked((j, i)); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); } } } @@ -291,7 +291,7 @@ impl> Matrix { * FIXME: specialize all the following for slices. * */ -impl> Matrix { +impl> Matrix { /* * * Column removal. @@ -783,7 +783,7 @@ impl> Matrix { } if new_ncols.value() > ncols { - res.columns_range_mut(ncols..).fill(val); + res.columns_range_mut(ncols..).fill(val.inlined_clone()); } if new_nrows.value() > nrows { @@ -797,7 +797,7 @@ impl> Matrix { } #[cfg(any(feature = "std", feature = "alloc"))] -impl DMatrix { +impl DMatrix { /// Resizes this matrix in-place. /// /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more @@ -814,7 +814,7 @@ impl DMatrix { } #[cfg(any(feature = "std", feature = "alloc"))] -impl MatrixMN +impl MatrixMN where DefaultAllocator: Allocator { /// Changes the number of rows of this matrix in-place. @@ -835,7 +835,7 @@ where DefaultAllocator: Allocator } #[cfg(any(feature = "std", feature = "alloc"))] -impl MatrixMN +impl MatrixMN where DefaultAllocator: Allocator { /// Changes the number of column of this matrix in-place. @@ -855,7 +855,7 @@ where DefaultAllocator: Allocator } } -unsafe fn compress_rows( +unsafe fn compress_rows( data: &mut [N], nrows: usize, ncols: usize, @@ -895,7 +895,7 @@ unsafe fn compress_rows( // Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index. // The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements. -unsafe fn extend_rows( +unsafe fn extend_rows( data: &mut [N], nrows: usize, ncols: usize, @@ -938,7 +938,7 @@ unsafe fn extend_rows( #[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where - N: Scalar + Copy, + N: Scalar + Clone, R: Dim, S: Extend, { @@ -986,7 +986,7 @@ where #[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where - N: Scalar + Copy, + N: Scalar + Clone, S: Extend, { /// Extend the number of rows of a `Vector` with elements @@ -1007,7 +1007,7 @@ where #[cfg(any(feature = "std", feature = "alloc"))] impl Extend> for Matrix where - N: Scalar + Copy, + N: Scalar + Clone, R: Dim, S: Extend>, RV: Dim, diff --git a/src/base/indexing.rs b/src/base/indexing.rs index 8ce52454..35b4a9ff 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -267,7 +267,7 @@ fn dimrange_rangetoinclusive_usize() { } /// A helper trait used for indexing operations. -pub trait MatrixIndex<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage>: Sized { +pub trait MatrixIndex<'a, N: Scalar + Clone, R: Dim, C: Dim, S: Storage>: Sized { /// The output type returned by methods. type Output : 'a; @@ -303,7 +303,7 @@ pub trait MatrixIndex<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage> } /// A helper trait used for indexing operations. -pub trait MatrixIndexMut<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut>: MatrixIndex<'a, N, R, C, S> { +pub trait MatrixIndexMut<'a, N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut>: MatrixIndex<'a, N, R, C, S> { /// The output type returned by methods. type OutputMut : 'a; @@ -432,7 +432,7 @@ pub trait MatrixIndexMut<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut> Matrix +impl> Matrix { /// Produces a view of the data at the given index, or /// `None` if the index is out of bounds. @@ -502,7 +502,7 @@ impl> Matrix impl<'a, N, R, C, S> MatrixIndex<'a, N, R, C, S> for usize where - N: Scalar + Copy, + N: Scalar + Clone, R: Dim, C: Dim, S: Storage @@ -524,7 +524,7 @@ where impl<'a, N, R, C, S> MatrixIndexMut<'a, N, R, C, S> for usize where - N: Scalar + Copy, + N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut @@ -544,7 +544,7 @@ where impl<'a, N, R, C, S> MatrixIndex<'a, N, R, C, S> for (usize, usize) where - N: Scalar + Copy, + N: Scalar + Clone, R: Dim, C: Dim, S: Storage @@ -569,7 +569,7 @@ where impl<'a, N, R, C, S> MatrixIndexMut<'a, N, R, C, S> for (usize, usize) where - N: Scalar + Copy, + N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut @@ -607,7 +607,7 @@ macro_rules! impl_index_pair { { impl<'a, N, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, N, $R, $C, S> for ($RIdx, $CIdx) where - N: Scalar + Copy, + N: Scalar + Clone, $R: Dim, $C: Dim, S: Storage, @@ -643,7 +643,7 @@ macro_rules! impl_index_pair { impl<'a, N, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndexMut<'a, N, $R, $C, S> for ($RIdx, $CIdx) where - N: Scalar + Copy, + N: Scalar + Clone, $R: Dim, $C: Dim, S: StorageMut, diff --git a/src/base/iter.rs b/src/base/iter.rs index 63e61178..1032cd2a 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -10,7 +10,7 @@ use crate::base::{Scalar, Matrix, MatrixSlice, MatrixSliceMut}; macro_rules! iterator { (struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => { /// An iterator through a dense matrix with arbitrary strides matrix. - pub struct $Name<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage> { + pub struct $Name<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + $Storage> { ptr: $Ptr, inner_ptr: $Ptr, inner_end: $Ptr, @@ -21,7 +21,7 @@ macro_rules! iterator { // FIXME: we need to specialize for the case where the matrix storage is owned (in which // case the iterator is trivial because it does not have any stride). - impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, N, R, C, S> { + impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, N, R, C, S> { /// Creates a new iterator for the given matrix storage. pub fn new(storage: $SRef) -> $Name<'a, N, R, C, S> { let shape = storage.shape(); @@ -58,7 +58,7 @@ macro_rules! iterator { } } - impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage> Iterator + impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + $Storage> Iterator for $Name<'a, N, R, C, S> { type Item = $Ref; @@ -111,7 +111,7 @@ macro_rules! iterator { } } - impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator + impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator for $Name<'a, N, R, C, S> { #[inline] @@ -133,12 +133,12 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut N, &'a mut N, &'a */ #[derive(Clone)] /// An iterator through the rows of a matrix. -pub struct RowIter<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage> { +pub struct RowIter<'a, N: Scalar + Clone, R: Dim, C: Dim, S: Storage> { mat: &'a Matrix, curr: usize } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, N, R, C, S> { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, N, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { RowIter { mat, curr: 0 @@ -147,7 +147,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, N, R, C, S> { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, N, R, C, S> { type Item = MatrixSlice<'a, N, U1, C, S::RStride, S::CStride>; #[inline] @@ -172,7 +172,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> Iterator fo } } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for RowIter<'a, N, R, C, S> { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for RowIter<'a, N, R, C, S> { #[inline] fn len(&self) -> usize { self.mat.nrows() - self.curr @@ -181,13 +181,13 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIt /// An iterator through the mutable rows of a matrix. -pub struct RowIterMut<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut> { +pub struct RowIterMut<'a, N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix> } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, N, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { RowIterMut { mat, @@ -204,7 +204,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> RowIterM } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut<'a, N, R, C, S> { type Item = MatrixSliceMut<'a, N, U1, C, S::RStride, S::CStride>; #[inline] @@ -229,7 +229,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> Iterator } } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for RowIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for RowIterMut<'a, N, R, C, S> { #[inline] fn len(&self) -> usize { self.nrows() - self.curr @@ -244,12 +244,12 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> ExactSiz */ #[derive(Clone)] /// An iterator through the columns of a matrix. -pub struct ColumnIter<'a, N: Scalar + Copy, R: Dim, C: Dim, S: Storage> { +pub struct ColumnIter<'a, N: Scalar + Clone, R: Dim, C: Dim, S: Storage> { mat: &'a Matrix, curr: usize } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, N, R, C, S> { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, N, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { ColumnIter { mat, curr: 0 @@ -258,7 +258,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> ColumnIter< } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a, N, R, C, S> { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a, N, R, C, S> { type Item = MatrixSlice<'a, N, R, U1, S::RStride, S::CStride>; #[inline] @@ -283,7 +283,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> Iterator fo } } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for ColumnIter<'a, N, R, C, S> { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for ColumnIter<'a, N, R, C, S> { #[inline] fn len(&self) -> usize { self.mat.ncols() - self.curr @@ -292,13 +292,13 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIt /// An iterator through the mutable columns of a matrix. -pub struct ColumnIterMut<'a, N: Scalar + Copy, R: Dim, C: Dim, S: StorageMut> { +pub struct ColumnIterMut<'a, N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix> } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, N, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { ColumnIterMut { mat, @@ -315,7 +315,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIt } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for ColumnIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for ColumnIterMut<'a, N, R, C, S> { type Item = MatrixSliceMut<'a, N, R, U1, S::RStride, S::CStride>; #[inline] @@ -340,7 +340,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> Iterator } } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for ColumnIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for ColumnIterMut<'a, N, R, C, S> { #[inline] fn len(&self) -> usize { self.ncols() - self.curr diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 08c22cbb..7682f955 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -73,7 +73,7 @@ pub type MatrixCross = /// some concrete types for `N` and a compatible data storage type `S`). #[repr(C)] #[derive(Clone, Copy)] -pub struct Matrix { +pub struct Matrix { /// The data storage that contains all the matrix components and informations about its number /// of rows and column (if needed). pub data: S, @@ -81,7 +81,7 @@ pub struct Matrix { _phantoms: PhantomData<(N, R, C)>, } -impl fmt::Debug for Matrix { +impl fmt::Debug for Matrix { fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { formatter .debug_struct("Matrix") @@ -93,7 +93,7 @@ impl fmt::Debug for Matrix Serialize for Matrix where - N: Scalar + Copy, + N: Scalar + Clone, R: Dim, C: Dim, S: Serialize, @@ -107,7 +107,7 @@ where #[cfg(feature = "serde-serialize")] impl<'de, N, R, C, S> Deserialize<'de> for Matrix where - N: Scalar + Copy, + N: Scalar + Clone, R: Dim, C: Dim, S: Deserialize<'de>, @@ -122,7 +122,7 @@ where } #[cfg(feature = "abomonation-serialize")] -impl Abomonation for Matrix { +impl Abomonation for Matrix { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { self.data.entomb(writer) } @@ -136,7 +136,7 @@ impl Abomonation for Matrix Matrix { +impl Matrix { /// Creates a new matrix with the given data without statically checking that the matrix /// dimension matches the storage dimension. #[inline] @@ -148,7 +148,7 @@ impl Matrix { } } -impl> Matrix { +impl> Matrix { /// Creates a new matrix with the given data. #[inline] pub fn from_data(data: S) -> Self { @@ -403,7 +403,7 @@ impl> Matrix { for j in 0..res.ncols() { for i in 0..res.nrows() { unsafe { - *res.get_unchecked_mut((i, j)) = *self.get_unchecked((i, j)); + *res.get_unchecked_mut((i, j)) = self.get_unchecked((i, j)).inlined_clone(); } } } @@ -413,7 +413,7 @@ impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] - pub fn map N2>(&self, mut f: F) -> MatrixMN + pub fn map N2>(&self, mut f: F) -> MatrixMN where DefaultAllocator: Allocator { let (nrows, ncols) = self.data.shape(); @@ -422,7 +422,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = *self.data.get_unchecked(i, j); + let a = self.data.get_unchecked(i, j).inlined_clone(); *res.data.get_unchecked_mut(i, j) = f(a) } } @@ -434,7 +434,7 @@ impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. Unlike `map`, /// `f` also gets passed the row and column index, i.e. `f(row, col, value)`. #[inline] - pub fn map_with_location N2>( + pub fn map_with_location N2>( &self, mut f: F, ) -> MatrixMN @@ -448,7 +448,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = *self.data.get_unchecked(i, j); + let a = self.data.get_unchecked(i, j).inlined_clone(); *res.data.get_unchecked_mut(i, j) = f(i, j, a) } } @@ -462,8 +462,8 @@ impl> Matrix { #[inline] pub fn zip_map(&self, rhs: &Matrix, mut f: F) -> MatrixMN where - N2: Scalar + Copy, - N3: Scalar + Copy, + N2: Scalar + Clone, + N3: Scalar + Clone, S2: Storage, F: FnMut(N, N2) -> N3, DefaultAllocator: Allocator, @@ -480,8 +480,8 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = *self.data.get_unchecked(i, j); - let b = *rhs.data.get_unchecked(i, j); + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = rhs.data.get_unchecked(i, j).inlined_clone(); *res.data.get_unchecked_mut(i, j) = f(a, b) } } @@ -500,9 +500,9 @@ impl> Matrix { mut f: F, ) -> MatrixMN where - N2: Scalar + Copy, - N3: Scalar + Copy, - N4: Scalar + Copy, + N2: Scalar + Clone, + N3: Scalar + Clone, + N4: Scalar + Clone, S2: Storage, S3: Storage, F: FnMut(N, N2, N3) -> N4, @@ -521,9 +521,9 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = *self.data.get_unchecked(i, j); - let b = *b.data.get_unchecked(i, j); - let c = *c.data.get_unchecked(i, j); + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = b.data.get_unchecked(i, j).inlined_clone(); + let c = c.data.get_unchecked(i, j).inlined_clone(); *res.data.get_unchecked_mut(i, j) = f(a, b, c) } } @@ -542,7 +542,7 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = *self.data.get_unchecked(i, j); + let a = self.data.get_unchecked(i, j).inlined_clone(); res = f(res, a) } } @@ -555,7 +555,7 @@ impl> Matrix { #[inline] pub fn zip_fold(&self, rhs: &Matrix, init: Acc, mut f: impl FnMut(Acc, N, N2) -> Acc) -> Acc where - N2: Scalar + Copy, + N2: Scalar + Clone, R2: Dim, C2: Dim, S2: Storage, @@ -573,8 +573,8 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = *self.data.get_unchecked(i, j); - let b = *rhs.data.get_unchecked(i, j); + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = rhs.data.get_unchecked(i, j).inlined_clone(); res = f(res, a, b) } } @@ -602,7 +602,7 @@ impl> Matrix { for i in 0..nrows { for j in 0..ncols { unsafe { - *out.get_unchecked_mut((j, i)) = *self.get_unchecked((i, j)); + *out.get_unchecked_mut((j, i)) = self.get_unchecked((i, j)).inlined_clone(); } } } @@ -623,7 +623,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Mutably iterates through this matrix coordinates. #[inline] pub fn iter_mut(&mut self) -> MatrixIterMut { @@ -717,7 +717,7 @@ impl> Matrix> Matrix> Matrix> Matrix> Matrix(&mut self, rhs: &Matrix, mut f: impl FnMut(N, N2) -> N) - where N2: Scalar + Copy, + where N2: Scalar + Clone, R2: Dim, C2: Dim, S2: Storage, @@ -813,8 +813,8 @@ impl> Matrix> Matrix(&mut self, b: &Matrix, c: &Matrix, mut f: impl FnMut(N, N2, N3) -> N) - where N2: Scalar + Copy, + where N2: Scalar + Clone, R2: Dim, C2: Dim, S2: Storage, - N3: Scalar + Copy, + N3: Scalar + Clone, R3: Dim, C3: Dim, S3: Storage, @@ -850,16 +850,16 @@ impl> Matrix> Vector { +impl> Vector { /// Gets a reference to the i-th element of this column vector without bound checking. #[inline] pub unsafe fn vget_unchecked(&self, i: usize) -> &N { @@ -869,7 +869,7 @@ impl> Vector { } } -impl> Vector { +impl> Vector { /// Gets a mutable reference to the i-th element of this column vector without bound checking. #[inline] pub unsafe fn vget_unchecked_mut(&mut self, i: usize) -> &mut N { @@ -879,7 +879,7 @@ impl> Vector { } } -impl> Matrix { +impl> Matrix { /// Extracts a slice containing the entire matrix entries ordered column-by-columns. #[inline] pub fn as_slice(&self) -> &[N] { @@ -887,7 +887,7 @@ impl> Matrix> Matrix { +impl> Matrix { /// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns. #[inline] pub fn as_mut_slice(&mut self) -> &mut [N] { @@ -895,7 +895,7 @@ impl> Matrix< } } -impl> Matrix { +impl> Matrix { /// Transposes the square matrix `self` in-place. pub fn transpose_mut(&mut self) { assert!( @@ -1052,7 +1052,7 @@ impl> Matrix { } } -impl> SquareMatrix { +impl> SquareMatrix { /// The diagonal of this matrix. #[inline] pub fn diagonal(&self) -> VectorN @@ -1064,7 +1064,7 @@ impl> SquareMatrix { /// /// This is a more efficient version of `self.diagonal().map(f)` since this /// allocates only once. - pub fn map_diagonal(&self, mut f: impl FnMut(N) -> N2) -> VectorN + pub fn map_diagonal(&self, mut f: impl FnMut(N) -> N2) -> VectorN where DefaultAllocator: Allocator { assert!( self.is_square(), @@ -1076,7 +1076,7 @@ impl> SquareMatrix { for i in 0..dim.value() { unsafe { - *res.vget_unchecked_mut(i) = f(*self.get_unchecked((i, i))); + *res.vget_unchecked_mut(i) = f(self.get_unchecked((i, i)).inlined_clone()); } } @@ -1096,7 +1096,7 @@ impl> SquareMatrix { let mut res = N::zero(); for i in 0..dim.value() { - res += unsafe { *self.get_unchecked((i, i)) }; + res += unsafe { self.get_unchecked((i, i)).inlined_clone() }; } res @@ -1128,7 +1128,7 @@ impl> SquareMatrix { } } -impl + IsNotStaticOne, S: Storage> Matrix { +impl + IsNotStaticOne, S: Storage> Matrix { /// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and /// and setting the diagonal element to `1`. @@ -1144,7 +1144,7 @@ impl + IsNotStaticOne, S: Storage, S: Storage> Vector { +impl, S: Storage> Vector { /// Computes the coordinates in projective space of this vector, i.e., appends a `0` to its /// coordinates. #[inline] @@ -1170,7 +1170,7 @@ impl, S: Storage> Vector { } } -impl, S: Storage> Vector { +impl, S: Storage> Vector { /// Constructs a new vector of higher dimension by appending `element` to the end of `self`. #[inline] pub fn push(&self, element: N) -> VectorN> @@ -1188,7 +1188,7 @@ impl, S: Storage> Vector { impl AbsDiffEq for Matrix where - N: Scalar + Copy + AbsDiffEq, + N: Scalar + Clone + AbsDiffEq, S: Storage, N::Epsilon: Copy, { @@ -1209,7 +1209,7 @@ where impl RelativeEq for Matrix where - N: Scalar + Copy + RelativeEq, + N: Scalar + Clone + RelativeEq, S: Storage, N::Epsilon: Copy, { @@ -1232,7 +1232,7 @@ where impl UlpsEq for Matrix where - N: Scalar + Copy + UlpsEq, + N: Scalar + Clone + UlpsEq, S: Storage, N::Epsilon: Copy, { @@ -1252,7 +1252,7 @@ where impl PartialOrd for Matrix where - N: Scalar + Copy + PartialOrd, + N: Scalar + Clone + PartialOrd, S: Storage, { #[inline] @@ -1340,13 +1340,13 @@ where impl Eq for Matrix where - N: Scalar + Copy + Eq, + N: Scalar + Clone + Eq, S: Storage, {} impl PartialEq for Matrix where - N: Scalar + Copy, + N: Scalar + Clone, S: Storage, { #[inline] @@ -1363,13 +1363,13 @@ macro_rules! impl_fmt { ($trait: path, $fmt_str_without_precision: expr, $fmt_str_with_precision: expr) => { impl $trait for Matrix where - N: Scalar + Copy + $trait, + N: Scalar + Clone + $trait, S: Storage, DefaultAllocator: Allocator, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { #[cfg(feature = "std")] - fn val_width(val: N, f: &mut fmt::Formatter) -> usize { + fn val_width(val: &N, f: &mut fmt::Formatter) -> usize { match f.precision() { Some(precision) => format!($fmt_str_with_precision, val, precision).chars().count(), None => format!($fmt_str_without_precision, val).chars().count(), @@ -1377,7 +1377,7 @@ macro_rules! impl_fmt { } #[cfg(not(feature = "std"))] - fn val_width(_: N, _: &mut fmt::Formatter) -> usize { + fn val_width(_: &N, _: &mut fmt::Formatter) -> usize { 4 } @@ -1393,7 +1393,7 @@ macro_rules! impl_fmt { for i in 0..nrows { for j in 0..ncols { - lengths[(i, j)] = val_width(self[(i, j)], f); + lengths[(i, j)] = val_width(&self[(i, j)], f); max_length = crate::max(max_length, lengths[(i, j)]); } } @@ -1454,7 +1454,7 @@ fn lower_exp() { ") } -impl> Matrix { +impl> Matrix { /// The perpendicular product between two 2D column vectors, i.e. `a.x * b.y - a.y * b.x`. #[inline] pub fn perp(&self, b: &Matrix) -> N @@ -1470,8 +1470,8 @@ impl> Matrix> Matrix::from_usize(1); let mut res = Matrix::new_uninitialized_generic(nrows, ncols); - let ax = *self.get_unchecked((0, 0)); - let ay = *self.get_unchecked((1, 0)); - let az = *self.get_unchecked((2, 0)); + let ax = self.get_unchecked((0, 0)); + let ay = self.get_unchecked((1, 0)); + let az = self.get_unchecked((2, 0)); - let bx = *b.get_unchecked((0, 0)); - let by = *b.get_unchecked((1, 0)); - let bz = *b.get_unchecked((2, 0)); + let bx = b.get_unchecked((0, 0)); + let by = b.get_unchecked((1, 0)); + let bz = b.get_unchecked((2, 0)); - *res.get_unchecked_mut((0, 0)) = ay * bz - az * by; - *res.get_unchecked_mut((1, 0)) = az * bx - ax * bz; - *res.get_unchecked_mut((2, 0)) = ax * by - ay * bx; + *res.get_unchecked_mut((0, 0)) = ay.inlined_clone() * bz.inlined_clone() - az.inlined_clone() * by.inlined_clone(); + *res.get_unchecked_mut((1, 0)) = az.inlined_clone() * bx.inlined_clone() - ax.inlined_clone() * bz.inlined_clone(); + *res.get_unchecked_mut((2, 0)) = ax.inlined_clone() * by.inlined_clone() - ay.inlined_clone() * bx.inlined_clone(); res } @@ -1527,17 +1527,17 @@ impl> Matrix::from_usize(3); let mut res = Matrix::new_uninitialized_generic(nrows, ncols); - let ax = *self.get_unchecked((0, 0)); - let ay = *self.get_unchecked((0, 1)); - let az = *self.get_unchecked((0, 2)); + let ax = self.get_unchecked((0, 0)); + let ay = self.get_unchecked((0, 1)); + let az = self.get_unchecked((0, 2)); - let bx = *b.get_unchecked((0, 0)); - let by = *b.get_unchecked((0, 1)); - let bz = *b.get_unchecked((0, 2)); + let bx = b.get_unchecked((0, 0)); + let by = b.get_unchecked((0, 1)); + let bz = b.get_unchecked((0, 2)); - *res.get_unchecked_mut((0, 0)) = ay * bz - az * by; - *res.get_unchecked_mut((0, 1)) = az * bx - ax * bz; - *res.get_unchecked_mut((0, 2)) = ax * by - ay * bx; + *res.get_unchecked_mut((0, 0)) = ay.inlined_clone() * bz.inlined_clone() - az.inlined_clone() * by.inlined_clone(); + *res.get_unchecked_mut((0, 1)) = az.inlined_clone() * bx.inlined_clone() - ax.inlined_clone() * bz.inlined_clone(); + *res.get_unchecked_mut((0, 2)) = ax.inlined_clone() * by.inlined_clone() - ay.inlined_clone() * bx.inlined_clone(); res } @@ -1545,7 +1545,7 @@ impl> Matrix> Vector +impl> Vector where DefaultAllocator: Allocator { /// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`. @@ -1553,13 +1553,13 @@ where DefaultAllocator: Allocator pub fn cross_matrix(&self) -> MatrixN { MatrixN::::new( N::zero(), - -self[2], - self[1], - self[2], + -self[2].inlined_clone(), + self[1].inlined_clone(), + self[2].inlined_clone(), N::zero(), - -self[0], - -self[1], - self[0], + -self[0].inlined_clone(), + -self[1].inlined_clone(), + self[0].inlined_clone(), N::zero(), ) } @@ -1593,7 +1593,7 @@ impl> Matrix { } } -impl> +impl> Vector { /// Returns `self * (1.0 - t) + rhs * t`, i.e., the linear blend of the vectors x and y using the scalar value a. @@ -1611,7 +1611,7 @@ impl>(&self, rhs: &Vector, t: N) -> VectorN where DefaultAllocator: Allocator { let mut res = self.clone_owned(); - res.axpy(t, rhs, N::one() - t); + res.axpy(t.inlined_clone(), rhs, N::one() - t); res } } @@ -1683,7 +1683,7 @@ impl> Unit> { impl AbsDiffEq for Unit> where - N: Scalar + Copy + AbsDiffEq, + N: Scalar + Clone + AbsDiffEq, S: Storage, N::Epsilon: Copy, { @@ -1702,7 +1702,7 @@ where impl RelativeEq for Unit> where - N: Scalar + Copy + RelativeEq, + N: Scalar + Clone + RelativeEq, S: Storage, N::Epsilon: Copy, { @@ -1726,7 +1726,7 @@ where impl UlpsEq for Unit> where - N: Scalar + Copy + UlpsEq, + N: Scalar + Clone + UlpsEq, S: Storage, N::Epsilon: Copy, { @@ -1743,7 +1743,7 @@ where impl Hash for Matrix where - N: Scalar + Copy + Hash, + N: Scalar + Clone + Hash, R: Dim, C: Dim, S: Storage, diff --git a/src/base/matrix_alga.rs b/src/base/matrix_alga.rs index 330c5f94..b275ce2e 100644 --- a/src/base/matrix_alga.rs +++ b/src/base/matrix_alga.rs @@ -25,7 +25,7 @@ use crate::base::{DefaultAllocator, MatrixMN, MatrixN, Scalar}; */ impl Identity for MatrixMN where - N: Scalar + Copy + Zero, + N: Scalar + Clone + Zero, DefaultAllocator: Allocator, { #[inline] @@ -36,7 +36,7 @@ where impl AbstractMagma for MatrixMN where - N: Scalar + Copy + ClosedAdd, + N: Scalar + Clone + ClosedAdd, DefaultAllocator: Allocator, { #[inline] @@ -47,7 +47,7 @@ where impl TwoSidedInverse for MatrixMN where - N: Scalar + Copy + ClosedNeg, + N: Scalar + Clone + ClosedNeg, DefaultAllocator: Allocator, { #[inline] @@ -64,7 +64,7 @@ where macro_rules! inherit_additive_structure( ($($marker: ident<$operator: ident> $(+ $bounds: ident)*),* $(,)*) => {$( impl $marker<$operator> for MatrixMN - where N: Scalar + Copy + $marker<$operator> $(+ $bounds)*, + where N: Scalar + Clone + $marker<$operator> $(+ $bounds)*, DefaultAllocator: Allocator { } )*} ); @@ -80,7 +80,7 @@ inherit_additive_structure!( impl AbstractModule for MatrixMN where - N: Scalar + Copy + RingCommutative, + N: Scalar + Clone + RingCommutative, DefaultAllocator: Allocator, { type AbstractRing = N; @@ -93,7 +93,7 @@ where impl Module for MatrixMN where - N: Scalar + Copy + RingCommutative, + N: Scalar + Clone + RingCommutative, DefaultAllocator: Allocator, { type Ring = N; @@ -101,7 +101,7 @@ where impl VectorSpace for MatrixMN where - N: Scalar + Copy + Field, + N: Scalar + Clone + Field, DefaultAllocator: Allocator, { type Field = N; @@ -109,7 +109,7 @@ where impl FiniteDimVectorSpace for MatrixMN where - N: Scalar + Copy + Field, + N: Scalar + Clone + Field, DefaultAllocator: Allocator, { #[inline] @@ -329,7 +329,7 @@ where DefaultAllocator: Allocator */ impl Identity for MatrixN where - N: Scalar + Copy + Zero + One, + N: Scalar + Clone + Zero + One, DefaultAllocator: Allocator, { #[inline] @@ -340,7 +340,7 @@ where impl AbstractMagma for MatrixN where - N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, DefaultAllocator: Allocator, { #[inline] @@ -352,7 +352,7 @@ where macro_rules! impl_multiplicative_structure( ($($marker: ident<$operator: ident> $(+ $bounds: ident)*),* $(,)*) => {$( impl $marker<$operator> for MatrixN - where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul + $marker<$operator> $(+ $bounds)*, + where N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul + $marker<$operator> $(+ $bounds)*, DefaultAllocator: Allocator { } )*} ); @@ -369,7 +369,7 @@ impl_multiplicative_structure!( */ impl MeetSemilattice for MatrixMN where - N: Scalar + Copy + MeetSemilattice, + N: Scalar + Clone + MeetSemilattice, DefaultAllocator: Allocator, { #[inline] @@ -380,7 +380,7 @@ where impl JoinSemilattice for MatrixMN where - N: Scalar + Copy + JoinSemilattice, + N: Scalar + Clone + JoinSemilattice, DefaultAllocator: Allocator, { #[inline] @@ -391,7 +391,7 @@ where impl Lattice for MatrixMN where - N: Scalar + Copy + Lattice, + N: Scalar + Clone + Lattice, DefaultAllocator: Allocator, { #[inline] diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 43131680..1ab3d487 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -13,22 +13,22 @@ macro_rules! slice_storage_impl( ($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => { #[doc = $doc] #[derive(Debug)] - pub struct $T<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> { + pub struct $T<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> { ptr: $Ptr, shape: (R, C), strides: (RStride, CStride), _phantoms: PhantomData<$Ref>, } - unsafe impl<'a, N: Scalar + Copy + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send + unsafe impl<'a, N: Scalar + Clone + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send for $T<'a, N, R, C, RStride, CStride> {} - unsafe impl<'a, N: Scalar + Copy + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync + unsafe impl<'a, N: Scalar + Clone + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync for $T<'a, N, R, C, RStride, CStride> {} - impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, N, R, C, RStride, CStride> { + impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, N, R, C, RStride, CStride> { /// Create a new matrix slice without bound checking and from a raw pointer. #[inline] pub unsafe fn from_raw_parts(ptr: $Ptr, @@ -48,7 +48,7 @@ macro_rules! slice_storage_impl( } // Dynamic is arbitrary. It's just to be able to call the constructors with `Slice::` - impl<'a, N: Scalar + Copy, R: Dim, C: Dim> $T<'a, N, R, C, Dynamic, Dynamic> { + impl<'a, N: Scalar + Clone, R: Dim, C: Dim> $T<'a, N, R, C, Dynamic, Dynamic> { /// Create a new matrix slice without bound checking. #[inline] pub unsafe fn new_unchecked(storage: $SRef, start: (usize, usize), shape: (R, C)) @@ -89,12 +89,12 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut N as &'a mut N) ); -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy for SliceStorage<'a, N, R, C, RStride, CStride> { } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone +impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone for SliceStorage<'a, N, R, C, RStride, CStride> { #[inline] @@ -110,7 +110,7 @@ impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone macro_rules! storage_impl( ($($T: ident),* $(,)*) => {$( - unsafe impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage + unsafe impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage for $T<'a, N, R, C, RStride, CStride> { type RStride = RStride; @@ -178,7 +178,7 @@ macro_rules! storage_impl( storage_impl!(SliceStorage, SliceStorageMut); -unsafe impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut +unsafe impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut for SliceStorageMut<'a, N, R, C, RStride, CStride> { #[inline] @@ -198,15 +198,15 @@ unsafe impl<'a, N: Scalar + Copy, R: Dim, C: Dim, RStride: Dim, CStride: Dim> St } } -unsafe impl<'a, N: Scalar + Copy, R: Dim, CStride: Dim> ContiguousStorage for SliceStorage<'a, N, R, U1, U1, CStride> { } -unsafe impl<'a, N: Scalar + Copy, R: Dim, CStride: Dim> ContiguousStorage for SliceStorageMut<'a, N, R, U1, U1, CStride> { } -unsafe impl<'a, N: Scalar + Copy, R: Dim, CStride: Dim> ContiguousStorageMut for SliceStorageMut<'a, N, R, U1, U1, CStride> { } +unsafe impl<'a, N: Scalar + Clone, R: Dim, CStride: Dim> ContiguousStorage for SliceStorage<'a, N, R, U1, U1, CStride> { } +unsafe impl<'a, N: Scalar + Clone, R: Dim, CStride: Dim> ContiguousStorage for SliceStorageMut<'a, N, R, U1, U1, CStride> { } +unsafe impl<'a, N: Scalar + Clone, R: Dim, CStride: Dim> ContiguousStorageMut for SliceStorageMut<'a, N, R, U1, U1, CStride> { } -unsafe impl<'a, N: Scalar + Copy, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorage<'a, N, R, C, U1, R> { } -unsafe impl<'a, N: Scalar + Copy, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorageMut<'a, N, R, C, U1, R> { } -unsafe impl<'a, N: Scalar + Copy, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut for SliceStorageMut<'a, N, R, C, U1, R> { } +unsafe impl<'a, N: Scalar + Clone, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorage<'a, N, R, C, U1, R> { } +unsafe impl<'a, N: Scalar + Clone, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorageMut<'a, N, R, C, U1, R> { } +unsafe impl<'a, N: Scalar + Clone, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut for SliceStorageMut<'a, N, R, C, U1, R> { } -impl> Matrix { +impl> Matrix { #[inline] fn assert_slice_index( &self, @@ -261,7 +261,7 @@ macro_rules! matrix_slice_impl( pub type $MatrixSlice<'a, N, R, C, RStride, CStride> = Matrix>; - impl> Matrix { + impl> Matrix { /* * * Row slicing. @@ -786,7 +786,7 @@ impl SliceRange for RangeFull { } } -impl> Matrix { +impl> Matrix { /// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed /// by the range `cols`. #[inline] @@ -827,7 +827,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns /// indexed by the range `cols`. pub fn slice_range_mut( @@ -871,7 +871,7 @@ impl> Matrix From> for MatrixSlice<'a, N, R, C, RStride, CStride> where - N: Scalar + Copy, + N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, diff --git a/src/base/ops.rs b/src/base/ops.rs index 77b35e94..9f785daa 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -20,7 +20,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, MatrixSum, Scalar * Indexing. * */ -impl> Index for Matrix { +impl> Index for Matrix { type Output = N; #[inline] @@ -32,7 +32,7 @@ impl> Index for Mat impl Index<(usize, usize)> for Matrix where - N: Scalar + Copy, + N: Scalar + Clone, S: Storage, { type Output = N; @@ -50,7 +50,7 @@ where } // Mutable versions. -impl> IndexMut for Matrix { +impl> IndexMut for Matrix { #[inline] fn index_mut(&mut self, i: usize) -> &mut N { let ij = self.vector_to_matrix_index(i); @@ -60,7 +60,7 @@ impl> IndexMut f impl IndexMut<(usize, usize)> for Matrix where - N: Scalar + Copy, + N: Scalar + Clone, S: StorageMut, { #[inline] @@ -82,7 +82,7 @@ where */ impl Neg for Matrix where - N: Scalar + Copy + ClosedNeg, + N: Scalar + Clone + ClosedNeg, S: Storage, DefaultAllocator: Allocator, { @@ -98,7 +98,7 @@ where impl<'a, N, R: Dim, C: Dim, S> Neg for &'a Matrix where - N: Scalar + Copy + ClosedNeg, + N: Scalar + Clone + ClosedNeg, S: Storage, DefaultAllocator: Allocator, { @@ -112,14 +112,14 @@ where impl Matrix where - N: Scalar + Copy + ClosedNeg, + N: Scalar + Clone + ClosedNeg, S: StorageMut, { /// Negates `self` in-place. #[inline] pub fn neg_mut(&mut self) { for e in self.iter_mut() { - *e = -*e + *e = -e.inlined_clone() } } } @@ -137,7 +137,7 @@ macro_rules! componentwise_binop_impl( $method_to: ident, $method_to_statically_unchecked: ident) => { impl> Matrix - where N: Scalar + Copy + $bound { + where N: Scalar + Clone + $bound { /* * @@ -164,7 +164,7 @@ macro_rules! componentwise_binop_impl( let out = out.data.as_mut_slice(); for i in 0 .. arr1.len() { unsafe { - *out.get_unchecked_mut(i) = arr1.get_unchecked(i).$method(*arr2.get_unchecked(i)); + *out.get_unchecked_mut(i) = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone()); } } } @@ -172,7 +172,7 @@ macro_rules! componentwise_binop_impl( for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { unsafe { - let val = self.get_unchecked((i, j)).$method(*rhs.get_unchecked((i, j))); + let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone()); *out.get_unchecked_mut((i, j)) = val; } } @@ -196,7 +196,7 @@ macro_rules! componentwise_binop_impl( let arr2 = rhs.data.as_slice(); for i in 0 .. arr2.len() { unsafe { - arr1.get_unchecked_mut(i).$method_assign(*arr2.get_unchecked(i)); + arr1.get_unchecked_mut(i).$method_assign(arr2.get_unchecked(i).inlined_clone()); } } } @@ -204,7 +204,7 @@ macro_rules! componentwise_binop_impl( for j in 0 .. rhs.ncols() { for i in 0 .. rhs.nrows() { unsafe { - self.get_unchecked_mut((i, j)).$method_assign(*rhs.get_unchecked((i, j))) + self.get_unchecked_mut((i, j)).$method_assign(rhs.get_unchecked((i, j)).inlined_clone()) } } } @@ -226,7 +226,7 @@ macro_rules! componentwise_binop_impl( let arr2 = rhs.data.as_mut_slice(); for i in 0 .. arr1.len() { unsafe { - let res = arr1.get_unchecked(i).$method(*arr2.get_unchecked(i)); + let res = arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone()); *arr2.get_unchecked_mut(i) = res; } } @@ -236,7 +236,7 @@ macro_rules! componentwise_binop_impl( for i in 0 .. self.nrows() { unsafe { let r = rhs.get_unchecked_mut((i, j)); - *r = self.get_unchecked((i, j)).$method(*r) + *r = self.get_unchecked((i, j)).inlined_clone().$method(r.inlined_clone()) } } } @@ -267,7 +267,7 @@ macro_rules! componentwise_binop_impl( impl<'b, N, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix> for Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + Copy + $bound, + N: Scalar + Clone + $bound, SA: Storage, SB: Storage, DefaultAllocator: SameShapeAllocator, @@ -285,7 +285,7 @@ macro_rules! componentwise_binop_impl( impl<'a, N, R1, C1, R2, C2, SA, SB> $Trait> for &'a Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + Copy + $bound, + N: Scalar + Clone + $bound, SA: Storage, SB: Storage, DefaultAllocator: SameShapeAllocator, @@ -303,7 +303,7 @@ macro_rules! componentwise_binop_impl( impl $Trait> for Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + Copy + $bound, + N: Scalar + Clone + $bound, SA: Storage, SB: Storage, DefaultAllocator: SameShapeAllocator, @@ -318,7 +318,7 @@ macro_rules! componentwise_binop_impl( impl<'a, 'b, N, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix> for &'a Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + Copy + $bound, + N: Scalar + Clone + $bound, SA: Storage, SB: Storage, DefaultAllocator: SameShapeAllocator, @@ -341,7 +341,7 @@ macro_rules! componentwise_binop_impl( impl<'b, N, R1, C1, R2, C2, SA, SB> $TraitAssign<&'b Matrix> for Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + Copy + $bound, + N: Scalar + Clone + $bound, SA: StorageMut, SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { @@ -354,7 +354,7 @@ macro_rules! componentwise_binop_impl( impl $TraitAssign> for Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + Copy + $bound, + N: Scalar + Clone + $bound, SA: StorageMut, SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { @@ -376,7 +376,7 @@ componentwise_binop_impl!(Sub, sub, ClosedSub; impl iter::Sum for MatrixMN where - N: Scalar + Copy + ClosedAdd + Zero, + N: Scalar + Clone + ClosedAdd + Zero, DefaultAllocator: Allocator, { fn sum>>(iter: I) -> MatrixMN { @@ -386,7 +386,7 @@ where impl iter::Sum for MatrixMN where - N: Scalar + Copy + ClosedAdd + Zero, + N: Scalar + Clone + ClosedAdd + Zero, DefaultAllocator: Allocator, { /// # Example @@ -416,7 +416,7 @@ where impl<'a, N, R: DimName, C: DimName> iter::Sum<&'a MatrixMN> for MatrixMN where - N: Scalar + Copy + ClosedAdd + Zero, + N: Scalar + Clone + ClosedAdd + Zero, DefaultAllocator: Allocator, { fn sum>>(iter: I) -> MatrixMN { @@ -426,7 +426,7 @@ where impl<'a, N, C: Dim> iter::Sum<&'a MatrixMN> for MatrixMN where - N: Scalar + Copy + ClosedAdd + Zero, + N: Scalar + Clone + ClosedAdd + Zero, DefaultAllocator: Allocator, { /// # Example @@ -466,7 +466,7 @@ macro_rules! componentwise_scalarop_impl( ($Trait: ident, $method: ident, $bound: ident; $TraitAssign: ident, $method_assign: ident) => { impl $Trait for Matrix - where N: Scalar + Copy + $bound, + where N: Scalar + Clone + $bound, S: Storage, DefaultAllocator: Allocator { type Output = MatrixMN; @@ -482,7 +482,7 @@ macro_rules! componentwise_scalarop_impl( // for left in res.iter_mut() { for left in res.as_mut_slice().iter_mut() { - *left = left.$method(rhs) + *left = left.inlined_clone().$method(rhs.inlined_clone()) } res @@ -490,7 +490,7 @@ macro_rules! componentwise_scalarop_impl( } impl<'a, N, R: Dim, C: Dim, S> $Trait for &'a Matrix - where N: Scalar + Copy + $bound, + where N: Scalar + Clone + $bound, S: Storage, DefaultAllocator: Allocator { type Output = MatrixMN; @@ -502,13 +502,13 @@ macro_rules! componentwise_scalarop_impl( } impl $TraitAssign for Matrix - where N: Scalar + Copy + $bound, + where N: Scalar + Clone + $bound, S: StorageMut { #[inline] fn $method_assign(&mut self, rhs: N) { for j in 0 .. self.ncols() { for i in 0 .. self.nrows() { - unsafe { self.get_unchecked_mut((i, j)).$method_assign(rhs) }; + unsafe { self.get_unchecked_mut((i, j)).$method_assign(rhs.inlined_clone()) }; } } } @@ -561,7 +561,7 @@ left_scalar_mul_impl!(u8, u16, u32, u64, usize, i8, i16, i32, i64, isize, f32, f impl<'a, 'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix> for &'a Matrix where - N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, SA: Storage, SB: Storage, DefaultAllocator: Allocator, @@ -582,7 +582,7 @@ where impl<'a, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul> for &'a Matrix where - N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: Storage, DefaultAllocator: Allocator, @@ -599,7 +599,7 @@ where impl<'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix> for Matrix where - N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: Storage, DefaultAllocator: Allocator, @@ -616,7 +616,7 @@ where impl Mul> for Matrix where - N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: Storage, DefaultAllocator: Allocator, @@ -638,7 +638,7 @@ where R1: Dim, C1: Dim, R2: Dim, - N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: ContiguousStorageMut + Clone, ShapeConstraint: AreMultipliable, @@ -655,7 +655,7 @@ where R1: Dim, C1: Dim, R2: Dim, - N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: ContiguousStorageMut + Clone, ShapeConstraint: AreMultipliable, @@ -671,7 +671,7 @@ where // Transpose-multiplication. impl Matrix where - N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, SA: Storage, { /// Equivalent to `self.transpose() * rhs`. @@ -810,10 +810,10 @@ where for j2 in 0..ncols2.value() { for i1 in 0..nrows1.value() { unsafe { - let coeff = *self.get_unchecked((i1, j1)); + let coeff = self.get_unchecked((i1, j1)).inlined_clone(); for i2 in 0..nrows2.value() { - *data_res = coeff * *rhs.get_unchecked((i2, j2)); + *data_res = coeff.inlined_clone() * rhs.get_unchecked((i2, j2)).inlined_clone(); data_res = data_res.offset(1); } } @@ -826,7 +826,7 @@ where } } -impl> Matrix { +impl> Matrix { /// Adds a scalar to `self`. #[inline] pub fn add_scalar(&self, rhs: N) -> MatrixMN @@ -841,14 +841,14 @@ impl> Matrix { for e in self.iter_mut() { - *e += rhs + *e += rhs.inlined_clone() } } } impl iter::Product for MatrixN where - N: Scalar + Copy + Zero + One + ClosedMul + ClosedAdd, + N: Scalar + Clone + Zero + One + ClosedMul + ClosedAdd, DefaultAllocator: Allocator, { fn product>>(iter: I) -> MatrixN { @@ -858,7 +858,7 @@ where impl<'a, N, D: DimName> iter::Product<&'a MatrixN> for MatrixN where - N: Scalar + Copy + Zero + One + ClosedMul + ClosedAdd, + N: Scalar + Clone + Zero + One + ClosedMul + ClosedAdd, DefaultAllocator: Allocator, { fn product>>(iter: I) -> MatrixN { @@ -866,7 +866,7 @@ where } } -impl> Matrix { +impl> Matrix { #[inline(always)] fn xcmp(&self, abs: impl Fn(N) -> N2, ordering: Ordering) -> N2 where N2: Scalar + PartialOrd + Zero { @@ -874,7 +874,7 @@ impl> Matrix { let mut max = iter.next().cloned().map_or(N2::zero(), &abs); for e in iter { - let ae = abs(*e); + let ae = abs(e.inlined_clone()); if ae.partial_cmp(&max) == Some(ordering) { max = ae; @@ -967,4 +967,4 @@ impl> Matrix { where N: PartialOrd + Zero { self.xcmp(|e| e, Ordering::Less) } -} \ No newline at end of file +} diff --git a/src/base/properties.rs b/src/base/properties.rs index 020e38d7..74ddf8cf 100644 --- a/src/base/properties.rs +++ b/src/base/properties.rs @@ -9,7 +9,7 @@ use crate::base::dimension::{Dim, DimMin}; use crate::base::storage::Storage; use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix}; -impl> Matrix { +impl> Matrix { /// Indicates if this is an empty matrix. #[inline] pub fn is_empty(&self) -> bool { diff --git a/src/base/scalar.rs b/src/base/scalar.rs index a6c837ff..070dc0a7 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -13,5 +13,17 @@ pub trait Scalar: PartialEq + Debug + Any { fn is() -> bool { TypeId::of::() == TypeId::of::() } + + #[inline(always)] + /// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway. + /// + /// Downstream crates need to implement this on any Clone Scalars, as a blanket impl would conflict with with the blanket Copy impl. + fn inlined_clone(&self) -> Self; +} + +impl Scalar for T { + #[inline(always)] + fn inlined_clone(&self) -> T { + *self + } } -impl Scalar for T {} diff --git a/src/base/statistics.rs b/src/base/statistics.rs index d71697d7..8a355c87 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -3,7 +3,7 @@ use alga::general::{Field, SupersetOf}; use crate::storage::Storage; use crate::allocator::Allocator; -impl> Matrix { +impl> Matrix { /// Returns a row vector where each element is the result of the application of `f` on the /// corresponding column of the original matrix. #[inline] @@ -54,7 +54,7 @@ impl> Matrix { } } -impl, R: Dim, C: Dim, S: Storage> Matrix { +impl, R: Dim, C: Dim, S: Storage> Matrix { /* * * Sum computation. @@ -154,9 +154,10 @@ impl, R: Dim, C: Dim, S: Storage(self.len() as f64); - val.0 * denom - (val.1 * denom) * (val.1 * denom) + let vd = val.1 * denom.inlined_clone(); + val.0 * denom - vd.inlined_clone() * vd } } @@ -213,14 +214,14 @@ impl, R: Dim, C: Dim, S: Storage(ncols.value() as f64); self.compress_columns(mean, |out, col| { for i in 0..nrows.value() { unsafe { let val = col.vget_unchecked(i); - *out.vget_unchecked_mut(i) += denom * *val * *val + *out.vget_unchecked_mut(i) += denom.inlined_clone() * val.inlined_clone() * val.inlined_clone() } } }) @@ -304,7 +305,7 @@ impl, R: Dim, C: Dim, S: Storage(ncols.value() as f64); self.compress_columns(VectorN::zeros_generic(nrows, U1), |out, col| { - out.axpy(denom, &col, N::one()) + out.axpy(denom.inlined_clone(), &col, N::one()) }) } } diff --git a/src/base/storage.rs b/src/base/storage.rs index cbd27527..f1b0177b 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -36,7 +36,7 @@ pub type CStride = /// should **not** allow the user to modify the size of the underlying buffer with safe methods /// (for example the `VecStorage::data_mut` method is unsafe because the user could change the /// vector's size so that it no longer contains enough elements: this will lead to UB. -pub unsafe trait Storage: Debug + Sized { +pub unsafe trait Storage: Debug + Sized { /// The static stride of this storage's rows. type RStride: Dim; @@ -117,7 +117,7 @@ pub unsafe trait Storage: Debug + Sized { /// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable /// matrix slice can provide mutable access to its elements even if it does not own its data (it /// contains only an internal reference to them). -pub unsafe trait StorageMut: Storage { +pub unsafe trait StorageMut: Storage { /// The matrix mutable data pointer. fn ptr_mut(&mut self) -> *mut N; @@ -175,7 +175,7 @@ pub unsafe trait StorageMut: Storage: +pub unsafe trait ContiguousStorage: Storage { } @@ -185,7 +185,7 @@ pub unsafe trait ContiguousStorage: /// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorageMut: +pub unsafe trait ContiguousStorageMut: ContiguousStorage + StorageMut { } diff --git a/src/base/swizzle.rs b/src/base/swizzle.rs index 4508c758..ec33f8da 100644 --- a/src/base/swizzle.rs +++ b/src/base/swizzle.rs @@ -5,14 +5,14 @@ use typenum::{self, Cmp, Greater}; macro_rules! impl_swizzle { ($( where $BaseDim: ident: $( $name: ident() -> $Result: ident[$($i: expr),+] ),+ ;)* ) => { $( - impl> Vector + impl> Vector where D::Value: Cmp { $( /// Builds a new vector from components of `self`. #[inline] pub fn $name(&self) -> $Result { - $Result::new($(self[$i]),*) + $Result::new($(self[$i].inlined_clone()),*) } )* } diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index a0230488..ffb1ac0c 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -102,7 +102,7 @@ impl Into> for VecStorage * Dynamic − Dynamic * */ -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where DefaultAllocator: Allocator { type RStride = U1; @@ -146,7 +146,7 @@ where DefaultAllocator: Allocator } } -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where DefaultAllocator: Allocator { type RStride = U1; @@ -195,7 +195,7 @@ where DefaultAllocator: Allocator * StorageMut, ContiguousStorage. * */ -unsafe impl StorageMut for VecStorage +unsafe impl StorageMut for VecStorage where DefaultAllocator: Allocator { #[inline] @@ -209,13 +209,13 @@ where DefaultAllocator: Allocator } } -unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator +unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator {} -unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator +unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator {} -unsafe impl StorageMut for VecStorage +unsafe impl StorageMut for VecStorage where DefaultAllocator: Allocator { #[inline] @@ -244,10 +244,10 @@ impl Abomonation for VecStorage { } } -unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator +unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator {} -unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator +unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator {} impl Extend for VecStorage @@ -270,7 +270,7 @@ impl Extend for VecStorage impl Extend> for VecStorage where - N: Scalar + Copy, + N: Scalar + Clone, R: Dim, RV: Dim, SV: Storage, @@ -291,7 +291,7 @@ where self.data.reserve(nrows * lower); for vector in iter { assert_eq!(nrows, vector.shape().0); - self.data.extend(vector.iter()); + self.data.extend(vector.iter().cloned()); } self.ncols = Dynamic::new(self.data.len() / nrows); } diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index 8a3e6486..8b9321f2 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -12,7 +12,7 @@ use crate::linalg::givens::GivensRotation; /// A random orthogonal matrix. #[derive(Clone, Debug)] -pub struct RandomOrthogonal +pub struct RandomOrthogonal where DefaultAllocator: Allocator { m: MatrixN, diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index 5875faf6..503d8689 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -13,7 +13,7 @@ use crate::debug::RandomOrthogonal; /// A random, well-conditioned, symmetric definite-positive matrix. #[derive(Clone, Debug)] -pub struct RandomSDP +pub struct RandomSDP where DefaultAllocator: Allocator { m: MatrixN, diff --git a/src/geometry/op_macros.rs b/src/geometry/op_macros.rs index 7ee81f07..2b12a8c9 100644 --- a/src/geometry/op_macros.rs +++ b/src/geometry/op_macros.rs @@ -18,7 +18,7 @@ macro_rules! md_impl( // Lifetime. $($lives: tt),*) => { impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs - where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*, + where N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*, DefaultAllocator: Allocator + Allocator + Allocator, @@ -96,7 +96,7 @@ macro_rules! md_assign_impl( // Actual implementation and lifetimes. $action: expr; $($lives: tt),*) => { impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs - where N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*, + where N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*, DefaultAllocator: Allocator + Allocator, $( $ConstraintType: $ConstraintBound $(<$( $ConstraintBoundParams $( = $EqBound )*),*>)* ),* @@ -148,7 +148,7 @@ macro_rules! add_sub_impl( $lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty, Output = $Result: ty; $action: expr; $($lives: tt),*) => { impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs - where N: Scalar + Copy + $bound, + where N: Scalar + Clone + $bound, DefaultAllocator: Allocator + Allocator + SameShapeAllocator, @@ -172,7 +172,7 @@ macro_rules! add_sub_assign_impl( $lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty; $action: expr; $($lives: tt),*) => { impl<$($lives ,)* N $(, $Dims: $DimsBound)*> $Op<$Rhs> for $Lhs - where N: Scalar + Copy + $bound, + where N: Scalar + Clone + $bound, DefaultAllocator: Allocator + Allocator, ShapeConstraint: SameNumberOfRows<$R1, $R2> + SameNumberOfColumns<$C1, $C2> { diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index 923da505..ec4575fa 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -18,7 +18,7 @@ use crate::base::{Matrix4, Scalar, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; /// A 3D perspective projection stored as an homogeneous 4x4 matrix. -pub struct Perspective3 { +pub struct Perspective3 { matrix: Matrix4, } diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 48031ba9..6911db78 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -20,14 +20,14 @@ use crate::base::{DefaultAllocator, Scalar, VectorN}; /// A point in a n-dimensional euclidean space. #[repr(C)] #[derive(Debug, Clone)] -pub struct Point +pub struct Point where DefaultAllocator: Allocator { /// The coordinates of this point, i.e., the shift from the origin. pub coords: VectorN, } -impl hash::Hash for Point +impl hash::Hash for Point where DefaultAllocator: Allocator, >::Buffer: hash::Hash, @@ -45,7 +45,7 @@ where } #[cfg(feature = "serde-serialize")] -impl Serialize for Point +impl Serialize for Point where DefaultAllocator: Allocator, >::Buffer: Serialize, @@ -57,7 +57,7 @@ where } #[cfg(feature = "serde-serialize")] -impl<'a, N: Scalar + Copy, D: DimName> Deserialize<'a> for Point +impl<'a, N: Scalar + Clone, D: DimName> Deserialize<'a> for Point where DefaultAllocator: Allocator, >::Buffer: Deserialize<'a>, @@ -73,7 +73,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Point where - N: Scalar + Copy, + N: Scalar + Clone, D: DimName, VectorN: Abomonation, DefaultAllocator: Allocator, @@ -91,7 +91,7 @@ where } } -impl Point +impl Point where DefaultAllocator: Allocator { /// Converts this point into a vector in homogeneous coordinates, i.e., appends a `1` at the @@ -210,7 +210,7 @@ where DefaultAllocator: Allocator } } -impl AbsDiffEq for Point +impl AbsDiffEq for Point where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -228,7 +228,7 @@ where } } -impl RelativeEq for Point +impl RelativeEq for Point where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -251,7 +251,7 @@ where } } -impl UlpsEq for Point +impl UlpsEq for Point where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -267,9 +267,9 @@ where } } -impl Eq for Point where DefaultAllocator: Allocator {} +impl Eq for Point where DefaultAllocator: Allocator {} -impl PartialEq for Point +impl PartialEq for Point where DefaultAllocator: Allocator { #[inline] @@ -278,7 +278,7 @@ where DefaultAllocator: Allocator } } -impl PartialOrd for Point +impl PartialOrd for Point where DefaultAllocator: Allocator { #[inline] @@ -312,7 +312,7 @@ where DefaultAllocator: Allocator * Display * */ -impl fmt::Display for Point +impl fmt::Display for Point where DefaultAllocator: Allocator { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { diff --git a/src/geometry/point_alga.rs b/src/geometry/point_alga.rs index 0deb52d3..b5ae46b8 100644 --- a/src/geometry/point_alga.rs +++ b/src/geometry/point_alga.rs @@ -7,9 +7,9 @@ use crate::base::{DefaultAllocator, Scalar, VectorN}; use crate::geometry::Point; -impl AffineSpace for Point +impl AffineSpace for Point where - N: Scalar + Copy + Field, + N: Scalar + Clone + Field, DefaultAllocator: Allocator, { type Translation = VectorN; @@ -49,7 +49,7 @@ where DefaultAllocator: Allocator */ impl MeetSemilattice for Point where - N: Scalar + Copy + MeetSemilattice, + N: Scalar + Clone + MeetSemilattice, DefaultAllocator: Allocator, { #[inline] @@ -60,7 +60,7 @@ where impl JoinSemilattice for Point where - N: Scalar + Copy + JoinSemilattice, + N: Scalar + Clone + JoinSemilattice, DefaultAllocator: Allocator, { #[inline] @@ -71,7 +71,7 @@ where impl Lattice for Point where - N: Scalar + Copy + Lattice, + N: Scalar + Clone + Lattice, DefaultAllocator: Allocator, { #[inline] diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 530c18a8..5ba9bc2e 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -12,7 +12,7 @@ use crate::base::{DefaultAllocator, Scalar, VectorN}; use crate::geometry::Point; -impl Point +impl Point where DefaultAllocator: Allocator { /// Creates a new point with uninitialized coordinates. @@ -94,12 +94,12 @@ where DefaultAllocator: Allocator #[inline] pub fn from_homogeneous(v: VectorN>) -> Option where - N: Scalar + Copy + Zero + One + ClosedDiv, + N: Scalar + Clone + Zero + One + ClosedDiv, D: DimNameAdd, DefaultAllocator: Allocator>, { if !v[D::dim()].is_zero() { - let coords = v.fixed_slice::(0, 0) / v[D::dim()]; + let coords = v.fixed_slice::(0, 0) / v[D::dim()].inlined_clone(); Some(Self::from(coords)) } else { None @@ -112,7 +112,7 @@ where DefaultAllocator: Allocator * Traits that build points. * */ -impl Bounded for Point +impl Bounded for Point where DefaultAllocator: Allocator { #[inline] @@ -126,7 +126,7 @@ where DefaultAllocator: Allocator } } -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -138,7 +138,7 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for Point +impl Arbitrary for Point where DefaultAllocator: Allocator, >::Buffer: Send, @@ -156,7 +156,7 @@ where */ macro_rules! componentwise_constructors_impl( ($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$( - impl Point + impl Point where DefaultAllocator: Allocator { #[doc = "Initializes this point from its components."] #[doc = "# Example\n```"] @@ -192,7 +192,7 @@ componentwise_constructors_impl!( macro_rules! from_array_impl( ($($D: ty, $len: expr);*) => {$( - impl From<[N; $len]> for Point { + impl From<[N; $len]> for Point { fn from (coords: [N; $len]) -> Self { Self { coords: coords.into() diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index b3131f52..d2c5a8c0 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -27,8 +27,8 @@ use std::convert::{AsMut, AsRef, From, Into}; impl SubsetOf> for Point where D: DimName, - N1: Scalar + Copy, - N2: Scalar + Copy + SupersetOf, + N1: Scalar + Clone, + N2: Scalar + Clone + SupersetOf, DefaultAllocator: Allocator + Allocator, { #[inline] @@ -52,8 +52,8 @@ where impl SubsetOf>> for Point where D: DimNameAdd, - N1: Scalar + Copy, - N2: Scalar + Copy + Zero + One + ClosedDiv + SupersetOf, + N1: Scalar + Clone, + N2: Scalar + Clone + Zero + One + ClosedDiv + SupersetOf, DefaultAllocator: Allocator + Allocator> + Allocator> @@ -72,7 +72,7 @@ where #[inline] unsafe fn from_superset_unchecked(v: &VectorN>) -> Self { - let coords = v.fixed_slice::(0, 0) / v[D::dim()]; + let coords = v.fixed_slice::(0, 0) / v[D::dim()].inlined_clone(); Self { coords: crate::convert_unchecked(coords) } @@ -83,7 +83,7 @@ where macro_rules! impl_from_into_mint_1D( ($($NRows: ident => $PT:ident, $VT:ident [$SZ: expr]);* $(;)*) => {$( impl From> for Point - where N: Scalar + Copy { + where N: Scalar + Clone { #[inline] fn from(p: mint::$PT) -> Self { Self { @@ -93,7 +93,7 @@ macro_rules! impl_from_into_mint_1D( } impl Into> for Point - where N: Scalar + Copy { + where N: Scalar + Clone { #[inline] fn into(self) -> mint::$PT { let mint_vec: mint::$VT = self.coords.into(); @@ -102,7 +102,7 @@ macro_rules! impl_from_into_mint_1D( } impl AsRef> for Point - where N: Scalar + Copy { + where N: Scalar + Clone { #[inline] fn as_ref(&self) -> &mint::$PT { unsafe { @@ -112,7 +112,7 @@ macro_rules! impl_from_into_mint_1D( } impl AsMut> for Point - where N: Scalar + Copy { + where N: Scalar + Clone { #[inline] fn as_mut(&mut self) -> &mut mint::$PT { unsafe { @@ -130,7 +130,7 @@ impl_from_into_mint_1D!( U3 => Point3, Vector3[3]; ); -impl From> for VectorN> +impl From> for VectorN> where D: DimNameAdd, DefaultAllocator: Allocator + Allocator>, @@ -141,7 +141,7 @@ where } } -impl From> for Point +impl From> for Point where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_coordinates.rs b/src/geometry/point_coordinates.rs index 1b6edf67..0ccc9441 100644 --- a/src/geometry/point_coordinates.rs +++ b/src/geometry/point_coordinates.rs @@ -16,7 +16,7 @@ use crate::geometry::Point; macro_rules! deref_impl( ($D: ty, $Target: ident $(, $comps: ident)*) => { - impl Deref for Point + impl Deref for Point where DefaultAllocator: Allocator { type Target = $Target; @@ -26,7 +26,7 @@ macro_rules! deref_impl( } } - impl DerefMut for Point + impl DerefMut for Point where DefaultAllocator: Allocator { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/geometry/point_ops.rs b/src/geometry/point_ops.rs index 2a4fae03..4f08f526 100644 --- a/src/geometry/point_ops.rs +++ b/src/geometry/point_ops.rs @@ -18,7 +18,7 @@ use crate::geometry::Point; * Indexing. * */ -impl Index for Point +impl Index for Point where DefaultAllocator: Allocator { type Output = N; @@ -29,7 +29,7 @@ where DefaultAllocator: Allocator } } -impl IndexMut for Point +impl IndexMut for Point where DefaultAllocator: Allocator { #[inline] @@ -43,7 +43,7 @@ where DefaultAllocator: Allocator * Neg. * */ -impl Neg for Point +impl Neg for Point where DefaultAllocator: Allocator { type Output = Self; @@ -54,7 +54,7 @@ where DefaultAllocator: Allocator } } -impl<'a, N: Scalar + Copy + ClosedNeg, D: DimName> Neg for &'a Point +impl<'a, N: Scalar + Clone + ClosedNeg, D: DimName> Neg for &'a Point where DefaultAllocator: Allocator { type Output = Point; @@ -138,7 +138,7 @@ add_sub_impl!(Add, add, ClosedAdd; macro_rules! op_assign_impl( ($($TraitAssign: ident, $method_assign: ident, $bound: ident);* $(;)*) => {$( impl<'b, N, D1: DimName, D2: Dim, SB> $TraitAssign<&'b Vector> for Point - where N: Scalar + Copy + $bound, + where N: Scalar + Clone + $bound, SB: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows { @@ -150,7 +150,7 @@ macro_rules! op_assign_impl( } impl $TraitAssign> for Point - where N: Scalar + Copy + $bound, + where N: Scalar + Clone + $bound, SB: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows { @@ -192,7 +192,7 @@ md_impl_all!( macro_rules! componentwise_scalarop_impl( ($Trait: ident, $method: ident, $bound: ident; $TraitAssign: ident, $method_assign: ident) => { - impl $Trait for Point + impl $Trait for Point where DefaultAllocator: Allocator { type Output = Point; @@ -202,7 +202,7 @@ macro_rules! componentwise_scalarop_impl( } } - impl<'a, N: Scalar + Copy + $bound, D: DimName> $Trait for &'a Point + impl<'a, N: Scalar + Clone + $bound, D: DimName> $Trait for &'a Point where DefaultAllocator: Allocator { type Output = Point; @@ -212,7 +212,7 @@ macro_rules! componentwise_scalarop_impl( } } - impl $TraitAssign for Point + impl $TraitAssign for Point where DefaultAllocator: Allocator { #[inline] fn $method_assign(&mut self, right: N) { diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index 9b4da872..585fe57b 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -8,7 +8,7 @@ use crate::storage::{Storage, StorageMut}; use crate::geometry::Point; /// A reflection wrt. a plane. -pub struct Reflection> { +pub struct Reflection> { axis: Vector, bias: N, } diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 0081262b..9ee8511a 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -24,13 +24,13 @@ use crate::geometry::Point; /// A rotation matrix. #[repr(C)] #[derive(Debug)] -pub struct Rotation +pub struct Rotation where DefaultAllocator: Allocator { matrix: MatrixN, } -impl hash::Hash for Rotation +impl hash::Hash for Rotation where DefaultAllocator: Allocator, >::Buffer: hash::Hash, @@ -47,7 +47,7 @@ where { } -impl Clone for Rotation +impl Clone for Rotation where DefaultAllocator: Allocator, >::Buffer: Clone, @@ -61,7 +61,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Rotation where - N: Scalar + Copy, + N: Scalar + Clone, D: DimName, MatrixN: Abomonation, DefaultAllocator: Allocator, @@ -80,7 +80,7 @@ where } #[cfg(feature = "serde-serialize")] -impl Serialize for Rotation +impl Serialize for Rotation where DefaultAllocator: Allocator, Owned: Serialize, @@ -92,7 +92,7 @@ where } #[cfg(feature = "serde-serialize")] -impl<'a, N: Scalar + Copy, D: DimName> Deserialize<'a> for Rotation +impl<'a, N: Scalar + Clone, D: DimName> Deserialize<'a> for Rotation where DefaultAllocator: Allocator, Owned: Deserialize<'a>, @@ -105,7 +105,7 @@ where } } -impl Rotation +impl Rotation where DefaultAllocator: Allocator { /// A reference to the underlying matrix representation of this rotation. @@ -432,9 +432,9 @@ where DefaultAllocator: Allocator + Allocator } } -impl Eq for Rotation where DefaultAllocator: Allocator {} +impl Eq for Rotation where DefaultAllocator: Allocator {} -impl PartialEq for Rotation +impl PartialEq for Rotation where DefaultAllocator: Allocator { #[inline] @@ -445,7 +445,7 @@ where DefaultAllocator: Allocator impl AbsDiffEq for Rotation where - N: Scalar + Copy + AbsDiffEq, + N: Scalar + Clone + AbsDiffEq, DefaultAllocator: Allocator, N::Epsilon: Copy, { @@ -464,7 +464,7 @@ where impl RelativeEq for Rotation where - N: Scalar + Copy + RelativeEq, + N: Scalar + Clone + RelativeEq, DefaultAllocator: Allocator, N::Epsilon: Copy, { @@ -488,7 +488,7 @@ where impl UlpsEq for Rotation where - N: Scalar + Copy + UlpsEq, + N: Scalar + Clone + UlpsEq, DefaultAllocator: Allocator, N::Epsilon: Copy, { diff --git a/src/geometry/rotation_construction.rs b/src/geometry/rotation_construction.rs index 3e9b2930..514ed439 100644 --- a/src/geometry/rotation_construction.rs +++ b/src/geometry/rotation_construction.rs @@ -10,7 +10,7 @@ use crate::geometry::Rotation; impl Rotation where - N: Scalar + Copy + Zero + One, + N: Scalar + Clone + Zero + One, DefaultAllocator: Allocator, { /// Creates a new square identity rotation of the given `dimension`. @@ -32,7 +32,7 @@ where impl One for Rotation where - N: Scalar + Copy + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, DefaultAllocator: Allocator, { #[inline] diff --git a/src/geometry/rotation_ops.rs b/src/geometry/rotation_ops.rs index 8b0810a1..553d8c62 100644 --- a/src/geometry/rotation_ops.rs +++ b/src/geometry/rotation_ops.rs @@ -30,7 +30,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, Scalar, Unit, Vector, Vect use crate::geometry::{Point, Rotation}; -impl Index<(usize, usize)> for Rotation +impl Index<(usize, usize)> for Rotation where DefaultAllocator: Allocator { type Output = N; diff --git a/src/geometry/swizzle.rs b/src/geometry/swizzle.rs index fdcb4743..149bcf02 100644 --- a/src/geometry/swizzle.rs +++ b/src/geometry/swizzle.rs @@ -6,7 +6,7 @@ use typenum::{self, Cmp, Greater}; macro_rules! impl_swizzle { ($( where $BaseDim: ident: $( $name: ident() -> $Result: ident[$($i: expr),+] ),+ ;)* ) => { $( - impl Point + impl Point where DefaultAllocator: Allocator, D::Value: Cmp @@ -15,7 +15,7 @@ macro_rules! impl_swizzle { /// Builds a new point from components of `self`. #[inline] pub fn $name(&self) -> $Result { - $Result::new($(self[$i]),*) + $Result::new($(self[$i].inlined_clone()),*) } )* } diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index 0e9b37fc..1540da09 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -23,7 +23,7 @@ use crate::geometry::Point; /// A translation. #[repr(C)] #[derive(Debug)] -pub struct Translation +pub struct Translation where DefaultAllocator: Allocator { /// The translation coordinates, i.e., how much is added to a point's coordinates when it is @@ -31,7 +31,7 @@ where DefaultAllocator: Allocator pub vector: VectorN, } -impl hash::Hash for Translation +impl hash::Hash for Translation where DefaultAllocator: Allocator, Owned: hash::Hash, @@ -47,7 +47,7 @@ where Owned: Copy, {} -impl Clone for Translation +impl Clone for Translation where DefaultAllocator: Allocator, Owned: Clone, @@ -61,7 +61,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Translation where - N: Scalar + Copy, + N: Scalar + Clone, D: DimName, VectorN: Abomonation, DefaultAllocator: Allocator, @@ -80,7 +80,7 @@ where } #[cfg(feature = "serde-serialize")] -impl Serialize for Translation +impl Serialize for Translation where DefaultAllocator: Allocator, Owned: Serialize, @@ -92,7 +92,7 @@ where } #[cfg(feature = "serde-serialize")] -impl<'a, N: Scalar + Copy, D: DimName> Deserialize<'a> for Translation +impl<'a, N: Scalar + Clone, D: DimName> Deserialize<'a> for Translation where DefaultAllocator: Allocator, Owned: Deserialize<'a>, @@ -105,7 +105,7 @@ where } } -impl Translation +impl Translation where DefaultAllocator: Allocator { /// Creates a new translation from the given vector. @@ -192,7 +192,7 @@ where DefaultAllocator: Allocator } } -impl Translation +impl Translation where DefaultAllocator: Allocator { /// Translate the given point. @@ -211,7 +211,7 @@ where DefaultAllocator: Allocator } } -impl Translation +impl Translation where DefaultAllocator: Allocator { /// Translate the given point by the inverse of this translation. @@ -228,9 +228,9 @@ where DefaultAllocator: Allocator } } -impl Eq for Translation where DefaultAllocator: Allocator {} +impl Eq for Translation where DefaultAllocator: Allocator {} -impl PartialEq for Translation +impl PartialEq for Translation where DefaultAllocator: Allocator { #[inline] @@ -239,7 +239,7 @@ where DefaultAllocator: Allocator } } -impl AbsDiffEq for Translation +impl AbsDiffEq for Translation where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -257,7 +257,7 @@ where } } -impl RelativeEq for Translation +impl RelativeEq for Translation where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -280,7 +280,7 @@ where } } -impl UlpsEq for Translation +impl UlpsEq for Translation where DefaultAllocator: Allocator, N::Epsilon: Copy, diff --git a/src/geometry/translation_construction.rs b/src/geometry/translation_construction.rs index 370fbf63..7ac3c5fc 100644 --- a/src/geometry/translation_construction.rs +++ b/src/geometry/translation_construction.rs @@ -15,7 +15,7 @@ use crate::base::{DefaultAllocator, Scalar, VectorN}; use crate::geometry::Translation; -impl Translation +impl Translation where DefaultAllocator: Allocator { /// Creates a new identity translation. @@ -38,7 +38,7 @@ where DefaultAllocator: Allocator } } -impl One for Translation +impl One for Translation where DefaultAllocator: Allocator { #[inline] @@ -47,7 +47,7 @@ where DefaultAllocator: Allocator } } -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -59,7 +59,7 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for Translation +impl Arbitrary for Translation where DefaultAllocator: Allocator, Owned: Send, @@ -78,7 +78,7 @@ where */ macro_rules! componentwise_constructors_impl( ($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$( - impl Translation + impl Translation where DefaultAllocator: Allocator { #[doc = "Initializes this translation from its components."] #[doc = "# Example\n```"] diff --git a/src/geometry/translation_conversion.rs b/src/geometry/translation_conversion.rs index 2a6d9535..ee2f0e6d 100644 --- a/src/geometry/translation_conversion.rs +++ b/src/geometry/translation_conversion.rs @@ -22,8 +22,8 @@ use crate::geometry::{Isometry, Point, Similarity, SuperTCategoryOf, TAffine, Tr impl SubsetOf> for Translation where - N1: Scalar + Copy, - N2: Scalar + Copy + SupersetOf, + N1: Scalar + Clone, + N2: Scalar + Clone + SupersetOf, DefaultAllocator: Allocator + Allocator, { #[inline] @@ -153,7 +153,7 @@ where } } -impl From> for MatrixN> +impl From> for MatrixN> where D: DimNameAdd, DefaultAllocator: Allocator + Allocator, DimNameSum>, @@ -164,7 +164,7 @@ where } } -impl From> for Translation +impl From> for Translation where DefaultAllocator: Allocator { #[inline] diff --git a/src/geometry/translation_coordinates.rs b/src/geometry/translation_coordinates.rs index 97eb5b32..10e5926f 100644 --- a/src/geometry/translation_coordinates.rs +++ b/src/geometry/translation_coordinates.rs @@ -16,7 +16,7 @@ use crate::geometry::Translation; macro_rules! deref_impl( ($D: ty, $Target: ident $(, $comps: ident)*) => { - impl Deref for Translation + impl Deref for Translation where DefaultAllocator: Allocator { type Target = $Target; @@ -26,7 +26,7 @@ macro_rules! deref_impl( } } - impl DerefMut for Translation + impl DerefMut for Translation where DefaultAllocator: Allocator { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 9676cd83..42cea3ad 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -318,7 +318,7 @@ where DefaultAllocator: Allocator + Allocator<(usize, usize), D> /// element `matrix[(i, i)]` is provided as argument. pub fn gauss_step(matrix: &mut Matrix, diag: N, i: usize) where - N: Scalar + Copy + Field, + N: Scalar + Clone + Field, S: StorageMut, { let mut submat = matrix.slice_range_mut(i.., i..); @@ -333,7 +333,7 @@ where let (pivot_row, mut down) = submat.rows_range_pair_mut(0, 1..); for k in 0..pivot_row.ncols() { - down.column_mut(k).axpy(-pivot_row[k], &coeffs, N::one()); + down.column_mut(k).axpy(-pivot_row[k].inlined_clone(), &coeffs, N::one()); } } @@ -346,7 +346,7 @@ pub fn gauss_step_swap( i: usize, piv: usize, ) where - N: Scalar + Copy + Field, + N: Scalar + Clone + Field, S: StorageMut, { let piv = piv - i; @@ -364,7 +364,7 @@ pub fn gauss_step_swap( for k in 0..pivot_row.ncols() { mem::swap(&mut pivot_row[k], &mut down[(piv - 1, k)]); - down.column_mut(k).axpy(-pivot_row[k], &coeffs, N::one()); + down.column_mut(k).axpy(-pivot_row[k].inlined_clone(), &coeffs, N::one()); } } diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index c7fb9b2b..a7d2e8aa 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -92,7 +92,7 @@ where DefaultAllocator: Allocator<(usize, usize), D> /// Applies this sequence of permutations to the rows of `rhs`. #[inline] - pub fn permute_rows(&self, rhs: &mut Matrix) + pub fn permute_rows(&self, rhs: &mut Matrix) where S2: StorageMut { for i in self.ipiv.rows_range(..self.len).iter() { rhs.swap_rows(i.0, i.1) @@ -101,7 +101,7 @@ where DefaultAllocator: Allocator<(usize, usize), D> /// Applies this sequence of permutations in reverse to the rows of `rhs`. #[inline] - pub fn inv_permute_rows( + pub fn inv_permute_rows( &self, rhs: &mut Matrix, ) where @@ -115,7 +115,7 @@ where DefaultAllocator: Allocator<(usize, usize), D> /// Applies this sequence of permutations to the columns of `rhs`. #[inline] - pub fn permute_columns( + pub fn permute_columns( &self, rhs: &mut Matrix, ) where @@ -128,7 +128,7 @@ where DefaultAllocator: Allocator<(usize, usize), D> /// Applies this sequence of permutations in reverse to the columns of `rhs`. #[inline] - pub fn inv_permute_columns( + pub fn inv_permute_columns( &self, rhs: &mut Matrix, ) where diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index e8c259a2..9cc6b51b 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -25,7 +25,7 @@ impl<'a, N> ColumnEntries<'a, N> { } } -impl<'a, N: Copy> Iterator for ColumnEntries<'a, N> { +impl<'a, N: Clone> Iterator for ColumnEntries<'a, N> { type Item = (usize, N); #[inline] @@ -33,8 +33,8 @@ impl<'a, N: Copy> Iterator for ColumnEntries<'a, N> { if self.curr >= self.i.len() { None } else { - let res = Some((unsafe { *self.i.get_unchecked(self.curr) }, unsafe { - *self.v.get_unchecked(self.curr) + let res = Some((unsafe { self.i.get_unchecked(self.curr).clone() }, unsafe { + self.v.get_unchecked(self.curr).clone() })); self.curr += 1; res @@ -105,7 +105,7 @@ pub trait CsStorageMut: /// A storage of column-compressed sparse matrix based on a Vec. #[derive(Clone, Debug, PartialEq)] -pub struct CsVecStorage +pub struct CsVecStorage where DefaultAllocator: Allocator { pub(crate) shape: (R, C), @@ -114,7 +114,7 @@ where DefaultAllocator: Allocator pub(crate) vals: Vec, } -impl CsVecStorage +impl CsVecStorage where DefaultAllocator: Allocator { /// The value buffer of this storage. @@ -133,9 +133,9 @@ where DefaultAllocator: Allocator } } -impl CsVecStorage where DefaultAllocator: Allocator {} +impl CsVecStorage where DefaultAllocator: Allocator {} -impl<'a, N: Scalar + Copy, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage +impl<'a, N: Scalar + Clone, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage where DefaultAllocator: Allocator { type ColumnEntries = ColumnEntries<'a, N>; @@ -154,7 +154,7 @@ where DefaultAllocator: Allocator } } -impl CsStorage for CsVecStorage +impl CsStorage for CsVecStorage where DefaultAllocator: Allocator { #[inline] @@ -199,7 +199,7 @@ where DefaultAllocator: Allocator } } -impl<'a, N: Scalar + Copy, R: Dim, C: Dim> CsStorageIterMut<'a, N, R, C> for CsVecStorage +impl<'a, N: Scalar + Clone, R: Dim, C: Dim> CsStorageIterMut<'a, N, R, C> for CsVecStorage where DefaultAllocator: Allocator { type ValuesMut = slice::IterMut<'a, N>; @@ -220,11 +220,11 @@ where DefaultAllocator: Allocator } } -impl CsStorageMut for CsVecStorage where DefaultAllocator: Allocator +impl CsStorageMut for CsVecStorage where DefaultAllocator: Allocator {} /* -pub struct CsSliceStorage<'a, N: Scalar + Copy, R: Dim, C: DimAdd> { +pub struct CsSliceStorage<'a, N: Scalar + Clone, R: Dim, C: DimAdd> { shape: (R, C), p: VectorSlice>, i: VectorSlice, @@ -234,7 +234,7 @@ pub struct CsSliceStorage<'a, N: Scalar + Copy, R: Dim, C: DimAdd> { /// A compressed sparse column matrix. #[derive(Clone, Debug, PartialEq)] pub struct CsMatrix< - N: Scalar + Copy, + N: Scalar + Clone, R: Dim = Dynamic, C: Dim = Dynamic, S: CsStorage = CsVecStorage, @@ -246,7 +246,7 @@ pub struct CsMatrix< /// A column compressed sparse vector. pub type CsVector> = CsMatrix; -impl CsMatrix +impl CsMatrix where DefaultAllocator: Allocator { /// Creates a new compressed sparse column matrix with the specified dimension and @@ -323,7 +323,7 @@ where DefaultAllocator: Allocator } /* -impl CsMatrix { +impl CsMatrix { pub(crate) fn from_parts( nrows: usize, ncols: usize, @@ -340,7 +340,7 @@ impl CsMatrix { } */ -impl> CsMatrix { +impl> CsMatrix { pub(crate) fn from_data(data: S) -> Self { CsMatrix { data, @@ -433,7 +433,7 @@ impl> CsMatrix> CsMatrix { +impl> CsMatrix { /// Iterator through all the mutable values of this sparse matrix. #[inline] pub fn values_mut(&mut self) -> impl Iterator { @@ -441,7 +441,7 @@ impl> CsMatrix CsMatrix +impl CsMatrix where DefaultAllocator: Allocator { pub(crate) fn sort(&mut self) @@ -470,7 +470,7 @@ where DefaultAllocator: Allocator // Permute the values too. for (i, irow) in range.clone().zip(self.data.i[range].iter().cloned()) { - self.data.vals[i] = workspace[irow]; + self.data.vals[i] = workspace[irow].inlined_clone(); } } } @@ -492,11 +492,11 @@ where DefaultAllocator: Allocator let curr_irow = self.data.i[idx]; if curr_irow == irow { - value += self.data.vals[idx]; + value += self.data.vals[idx].inlined_clone(); } else { self.data.i[curr_i] = irow; self.data.vals[curr_i] = value; - value = self.data.vals[idx]; + value = self.data.vals[idx].inlined_clone(); irow = curr_irow; curr_i += 1; } diff --git a/src/sparse/cs_matrix_conversion.rs b/src/sparse/cs_matrix_conversion.rs index 251fa282..0844f22c 100644 --- a/src/sparse/cs_matrix_conversion.rs +++ b/src/sparse/cs_matrix_conversion.rs @@ -7,7 +7,7 @@ use crate::sparse::{CsMatrix, CsStorage}; use crate::storage::Storage; use crate::{DefaultAllocator, Dim, Dynamic, Matrix, MatrixMN, Scalar}; -impl<'a, N: Scalar + Copy + Zero + ClosedAdd> CsMatrix { +impl<'a, N: Scalar + Clone + Zero + ClosedAdd> CsMatrix { /// Creates a column-compressed sparse matrix from a sparse matrix in triplet form. pub fn from_triplet( nrows: usize, @@ -21,7 +21,7 @@ impl<'a, N: Scalar + Copy + Zero + ClosedAdd> CsMatrix { } } -impl<'a, N: Scalar + Copy + Zero + ClosedAdd, R: Dim, C: Dim> CsMatrix +impl<'a, N: Scalar + Clone + Zero + ClosedAdd, R: Dim, C: Dim> CsMatrix where DefaultAllocator: Allocator + Allocator { /// Creates a column-compressed sparse matrix from a sparse matrix in triplet form. @@ -66,7 +66,7 @@ where DefaultAllocator: Allocator + Allocator } } -impl<'a, N: Scalar + Copy + Zero, R: Dim, C: Dim, S> From> for MatrixMN +impl<'a, N: Scalar + Clone + Zero, R: Dim, C: Dim, S> From> for MatrixMN where S: CsStorage, DefaultAllocator: Allocator, @@ -85,7 +85,7 @@ where } } -impl<'a, N: Scalar + Copy + Zero, R: Dim, C: Dim, S> From> for CsMatrix +impl<'a, N: Scalar + Clone + Zero, R: Dim, C: Dim, S> From> for CsMatrix where S: Storage, DefaultAllocator: Allocator + Allocator, @@ -103,7 +103,7 @@ where for i in 0..nrows.value() { if !column[i].is_zero() { res.data.i[nz] = i; - res.data.vals[nz] = column[i]; + res.data.vals[nz] = column[i].inlined_clone(); nz += 1; } } diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index 9e827e3c..e602fec6 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -8,7 +8,7 @@ use crate::sparse::{CsMatrix, CsStorage, CsStorageMut, CsVector}; use crate::storage::StorageMut; use crate::{DefaultAllocator, Dim, Scalar, Vector, VectorN, U1}; -impl> CsMatrix { +impl> CsMatrix { fn scatter( &self, j: usize, @@ -28,9 +28,9 @@ impl> CsMatrix> CsMatrix CsVector { +impl CsVector { pub fn axpy(&mut self, alpha: N, x: CsVector, beta: N) { // First, compute the number of non-zero entries. let mut nnzero = 0; @@ -76,7 +76,7 @@ impl CsVector { } */ -impl> Vector { +impl> Vector { /// Perform a sparse axpy operation: `self = alpha * x + beta * self` operation. pub fn axpy_cs(&mut self, alpha: N, x: &CsVector, beta: N) where @@ -88,18 +88,18 @@ impl Mul<&'b CsMatrix> for &'a CsMatrix where - N: Scalar + Copy + ClosedAdd + ClosedMul + Zero, + N: Scalar + Clone + ClosedAdd + ClosedMul + Zero, R1: Dim, C1: Dim, R2: Dim, @@ -159,14 +159,14 @@ where for (i, beta) in rhs.data.column_entries(j) { for (k, val) in self.data.column_entries(i) { - workspace[k] += val * beta; + workspace[k] += val.inlined_clone() * beta.inlined_clone(); } } for (i, val) in workspace.as_mut_slice().iter_mut().enumerate() { if !val.is_zero() { res.data.i[nz] = i; - res.data.vals[nz] = *val; + res.data.vals[nz] = val.inlined_clone(); *val = N::zero(); nz += 1; } @@ -219,7 +219,7 @@ where impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix> for &'a CsMatrix where - N: Scalar + Copy + ClosedAdd + ClosedMul + One, + N: Scalar + Clone + ClosedAdd + ClosedMul + One, R1: Dim, C1: Dim, R2: Dim, @@ -273,7 +273,7 @@ where res.data.i[range.clone()].sort(); for p in range { - res.data.vals[p] = workspace[res.data.i[p]] + res.data.vals[p] = workspace[res.data.i[p]].inlined_clone() } } @@ -287,7 +287,7 @@ where impl<'a, 'b, N, R, C, S> Mul for CsMatrix where - N: Scalar + Copy + ClosedAdd + ClosedMul + Zero, + N: Scalar + Clone + ClosedAdd + ClosedMul + Zero, R: Dim, C: Dim, S: CsStorageMut, @@ -296,7 +296,7 @@ where fn mul(mut self, rhs: N) -> Self::Output { for e in self.values_mut() { - *e *= rhs + *e *= rhs.inlined_clone() } self From 6c236af6962a1c85b13ac59c131a8feea1bede41 Mon Sep 17 00:00:00 2001 From: Avi Weinstock Date: Thu, 21 Nov 2019 16:43:58 -0500 Subject: [PATCH 41/67] Add Scalar + Copy bounds to code that's under feature flags. `./ci/test.sh` now passes locally. Refactoring done via the following sed commands: ```bash export RELEVANT_SOURCEFILES="$(find src -name '*.rs') $(find examples -name '*.rs')" for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Arbitrary\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Serialize\)/N\1: Scalar + Copy + \2/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar + \(Deserialize\)/N\1: Scalar + Copy + \2/' $f; do export RELEVANT_SOURCEFILES="$(find nalgebra-glm -name '*.rs')" for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar,/N\1: Scalar + Copy,/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar>/N\1: Scalar + Copy>/' $f; done for f in algebra-glm/src/traits.rs; do sed -i 's/Scalar + Ring/Scalar + Copy + Ring>/' $f; done # Number trait definition ``` --- nalgebra-glm/src/common.rs | 6 +-- nalgebra-glm/src/constructors.rs | 32 ++++++------- nalgebra-glm/src/gtc/bitfield.rs | 10 ++-- nalgebra-glm/src/gtc/integer.rs | 4 +- nalgebra-glm/src/gtc/matrix_access.rs | 8 ++-- nalgebra-glm/src/gtc/packing.rs | 6 +-- nalgebra-glm/src/gtc/round.rs | 18 +++---- nalgebra-glm/src/gtc/type_ptr.rs | 60 ++++++++++++------------ nalgebra-glm/src/gtc/ulp.rs | 2 +- nalgebra-glm/src/integer.rs | 20 ++++---- nalgebra-glm/src/matrix.rs | 2 +- nalgebra-glm/src/packing.rs | 24 +++++----- nalgebra-glm/src/traits.rs | 8 ++-- src/base/array_storage.rs | 6 +-- src/base/construction.rs | 2 +- src/geometry/point_construction.rs | 2 +- src/geometry/translation_construction.rs | 2 +- 17 files changed, 106 insertions(+), 106 deletions(-) diff --git a/nalgebra-glm/src/common.rs b/nalgebra-glm/src/common.rs index eda9f295..5cebbc3d 100644 --- a/nalgebra-glm/src/common.rs +++ b/nalgebra-glm/src/common.rs @@ -297,13 +297,13 @@ where DefaultAllocator: Alloc { v.map(int_bits_to_float) } -//pub fn isinf(x: &TVec) -> TVec +//pub fn isinf(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // unimplemented!() // //} // -//pub fn isnan(x: &TVec) -> TVec +//pub fn isnan(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // unimplemented!() // @@ -504,7 +504,7 @@ where DefaultAllocator: Alloc { x.map(|x| x.round()) } -//pub fn roundEven(x: &TVec) -> TVec +//pub fn roundEven(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // unimplemented!() //} diff --git a/nalgebra-glm/src/constructors.rs b/nalgebra-glm/src/constructors.rs index 949ea9e4..93d06e45 100644 --- a/nalgebra-glm/src/constructors.rs +++ b/nalgebra-glm/src/constructors.rs @@ -15,28 +15,28 @@ use crate::aliases::{TMat, Qua, TVec1, TVec2, TVec3, TVec4, TMat2, TMat2x3, TMat /// # use nalgebra_glm as glm; /// let v = glm::vec1(true); /// ``` -pub fn vec1(x: N) -> TVec1 { +pub fn vec1(x: N) -> TVec1 { TVec1::new(x) } /// Creates a new 2D vector. -pub fn vec2(x: N, y: N) -> TVec2 { +pub fn vec2(x: N, y: N) -> TVec2 { TVec2::new(x, y) } /// Creates a new 3D vector. -pub fn vec3(x: N, y: N, z: N) -> TVec3 { +pub fn vec3(x: N, y: N, z: N) -> TVec3 { TVec3::new(x, y, z) } /// Creates a new 4D vector. -pub fn vec4(x: N, y: N, z: N, w: N) -> TVec4 { +pub fn vec4(x: N, y: N, z: N, w: N) -> TVec4 { TVec4::new(x, y, z, w) } /// Create a new 2x2 matrix. -pub fn mat2(m11: N, m12: N, +pub fn mat2(m11: N, m12: N, m21: N, m22: N) -> TMat2 { TMat::::new( m11, m12, @@ -45,7 +45,7 @@ pub fn mat2(m11: N, m12: N, } /// Create a new 2x2 matrix. -pub fn mat2x2(m11: N, m12: N, +pub fn mat2x2(m11: N, m12: N, m21: N, m22: N) -> TMat2 { TMat::::new( m11, m12, @@ -54,7 +54,7 @@ pub fn mat2x2(m11: N, m12: N, } /// Create a new 2x3 matrix. -pub fn mat2x3(m11: N, m12: N, m13: N, +pub fn mat2x3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N) -> TMat2x3 { TMat::::new( m11, m12, m13, @@ -63,7 +63,7 @@ pub fn mat2x3(m11: N, m12: N, m13: N, } /// Create a new 2x4 matrix. -pub fn mat2x4(m11: N, m12: N, m13: N, m14: N, +pub fn mat2x4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N) -> TMat2x4 { TMat::::new( m11, m12, m13, m14, @@ -72,7 +72,7 @@ pub fn mat2x4(m11: N, m12: N, m13: N, m14: N, } /// Create a new 3x3 matrix. -pub fn mat3(m11: N, m12: N, m13: N, +pub fn mat3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N, m31: N, m32: N, m33: N) -> TMat3 { TMat::::new( @@ -83,7 +83,7 @@ pub fn mat3(m11: N, m12: N, m13: N, } /// Create a new 3x2 matrix. -pub fn mat3x2(m11: N, m12: N, +pub fn mat3x2(m11: N, m12: N, m21: N, m22: N, m31: N, m32: N) -> TMat3x2 { TMat::::new( @@ -94,7 +94,7 @@ pub fn mat3x2(m11: N, m12: N, } /// Create a new 3x3 matrix. -pub fn mat3x3(m11: N, m12: N, m13: N, +pub fn mat3x3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N, m31: N, m32: N, m33: N) -> TMat3 { TMat::::new( @@ -105,7 +105,7 @@ pub fn mat3x3(m11: N, m12: N, m13: N, } /// Create a new 3x4 matrix. -pub fn mat3x4(m11: N, m12: N, m13: N, m14: N, +pub fn mat3x4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N, m31: N, m32: N, m33: N, m34: N) -> TMat3x4 { TMat::::new( @@ -116,7 +116,7 @@ pub fn mat3x4(m11: N, m12: N, m13: N, m14: N, } /// Create a new 4x2 matrix. -pub fn mat4x2(m11: N, m12: N, +pub fn mat4x2(m11: N, m12: N, m21: N, m22: N, m31: N, m32: N, m41: N, m42: N) -> TMat4x2 { @@ -129,7 +129,7 @@ pub fn mat4x2(m11: N, m12: N, } /// Create a new 4x3 matrix. -pub fn mat4x3(m11: N, m12: N, m13: N, +pub fn mat4x3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N, m31: N, m32: N, m33: N, m41: N, m42: N, m43: N) -> TMat4x3 { @@ -142,7 +142,7 @@ pub fn mat4x3(m11: N, m12: N, m13: N, } /// Create a new 4x4 matrix. -pub fn mat4x4(m11: N, m12: N, m13: N, m14: N, +pub fn mat4x4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N, m31: N, m32: N, m33: N, m34: N, m41: N, m42: N, m43: N, m44: N) -> TMat4 { @@ -155,7 +155,7 @@ pub fn mat4x4(m11: N, m12: N, m13: N, m14: N, } /// Create a new 4x4 matrix. -pub fn mat4(m11: N, m12: N, m13: N, m14: N, +pub fn mat4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N, m31: N, m32: N, m33: N, m34: N, m41: N, m42: N, m43: N, m44: N) -> TMat4 { diff --git a/nalgebra-glm/src/gtc/bitfield.rs b/nalgebra-glm/src/gtc/bitfield.rs index bdf18552..5e4f7063 100644 --- a/nalgebra-glm/src/gtc/bitfield.rs +++ b/nalgebra-glm/src/gtc/bitfield.rs @@ -19,7 +19,7 @@ pub fn bitfieldFillOne(Value: IU, FirstBit: i32, BitCount: i32) -> IU { unimplemented!() } -pub fn bitfieldFillOne2(Value: &TVec, FirstBit: i32, BitCount: i32) -> TVec +pub fn bitfieldFillOne2(Value: &TVec, FirstBit: i32, BitCount: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -28,7 +28,7 @@ pub fn bitfieldFillZero(Value: IU, FirstBit: i32, BitCount: i32) -> IU { unimplemented!() } -pub fn bitfieldFillZero2(Value: &TVec, FirstBit: i32, BitCount: i32) -> TVec +pub fn bitfieldFillZero2(Value: &TVec, FirstBit: i32, BitCount: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -113,7 +113,7 @@ pub fn bitfieldRotateLeft(In: IU, Shift: i32) -> IU { unimplemented!() } -pub fn bitfieldRotateLeft2(In: &TVec, Shift: i32) -> TVec +pub fn bitfieldRotateLeft2(In: &TVec, Shift: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -122,7 +122,7 @@ pub fn bitfieldRotateRight(In: IU, Shift: i32) -> IU { unimplemented!() } -pub fn bitfieldRotateRight2(In: &TVec, Shift: i32) -> TVec +pub fn bitfieldRotateRight2(In: &TVec, Shift: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -131,7 +131,7 @@ pub fn mask(Bits: IU) -> IU { unimplemented!() } -pub fn mask2(v: &TVec) -> TVec +pub fn mask2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } diff --git a/nalgebra-glm/src/gtc/integer.rs b/nalgebra-glm/src/gtc/integer.rs index a972a4ab..146b874c 100644 --- a/nalgebra-glm/src/gtc/integer.rs +++ b/nalgebra-glm/src/gtc/integer.rs @@ -3,7 +3,7 @@ //use crate::traits::{Alloc, Dimension}; //use crate::aliases::TVec; -//pub fn iround(x: &TVec) -> TVec +//pub fn iround(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // x.map(|x| x.round()) //} @@ -12,7 +12,7 @@ // unimplemented!() //} // -//pub fn uround(x: &TVec) -> TVec +//pub fn uround(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // unimplemented!() //} diff --git a/nalgebra-glm/src/gtc/matrix_access.rs b/nalgebra-glm/src/gtc/matrix_access.rs index f61d9782..2f9bb5fa 100644 --- a/nalgebra-glm/src/gtc/matrix_access.rs +++ b/nalgebra-glm/src/gtc/matrix_access.rs @@ -10,7 +10,7 @@ use crate::traits::{Alloc, Dimension}; /// * [`row`](fn.row.html) /// * [`set_column`](fn.set_column.html) /// * [`set_row`](fn.set_row.html) -pub fn column( +pub fn column( m: &TMat, index: usize, ) -> TVec @@ -27,7 +27,7 @@ where /// * [`column`](fn.column.html) /// * [`row`](fn.row.html) /// * [`set_row`](fn.set_row.html) -pub fn set_column( +pub fn set_column( m: &TMat, index: usize, x: &TVec, @@ -47,7 +47,7 @@ where /// * [`column`](fn.column.html) /// * [`set_column`](fn.set_column.html) /// * [`set_row`](fn.set_row.html) -pub fn row(m: &TMat, index: usize) -> TVec +pub fn row(m: &TMat, index: usize) -> TVec where DefaultAllocator: Alloc { m.row(index).into_owned().transpose() } @@ -59,7 +59,7 @@ where DefaultAllocator: Alloc { /// * [`column`](fn.column.html) /// * [`row`](fn.row.html) /// * [`set_column`](fn.set_column.html) -pub fn set_row( +pub fn set_row( m: &TMat, index: usize, x: &TVec, diff --git a/nalgebra-glm/src/gtc/packing.rs b/nalgebra-glm/src/gtc/packing.rs index ea5acac4..1306eca3 100644 --- a/nalgebra-glm/src/gtc/packing.rs +++ b/nalgebra-glm/src/gtc/packing.rs @@ -49,7 +49,7 @@ pub fn packInt4x8(v: &I8Vec4) -> i32 { unimplemented!() } -pub fn packRGBM(rgb: &TVec3) -> TVec4 { +pub fn packRGBM(rgb: &TVec3) -> TVec4 { unimplemented!() } @@ -155,7 +155,7 @@ pub fn unpackF3x9_E1x5(p: i32) -> Vec3 { unimplemented!() } -pub fn unpackHalf(p: TVec) -> TVec +pub fn unpackHalf(p: TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -192,7 +192,7 @@ pub fn unpackInt4x8(p: i32) -> I8Vec4 { unimplemented!() } -pub fn unpackRGBM(rgbm: &TVec4) -> TVec3 { +pub fn unpackRGBM(rgbm: &TVec4) -> TVec3 { unimplemented!() } diff --git a/nalgebra-glm/src/gtc/round.rs b/nalgebra-glm/src/gtc/round.rs index 5ad95780..d1ca295b 100644 --- a/nalgebra-glm/src/gtc/round.rs +++ b/nalgebra-glm/src/gtc/round.rs @@ -8,7 +8,7 @@ pub fn ceilMultiple(v: T, Multiple: T) -> T { unimplemented!() } -pub fn ceilMultiple2(v: &TVec, Multiple: &TVec) -> TVec +pub fn ceilMultiple2(v: &TVec, Multiple: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -17,7 +17,7 @@ pub fn ceilPowerOfTwo(v: IU) -> IU { unimplemented!() } -pub fn ceilPowerOfTwo2(v: &TVec) -> TVec +pub fn ceilPowerOfTwo2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -26,7 +26,7 @@ pub fn floorMultiple(v: T, Multiple: T) -> T { unimplemented!() } -pub fn floorMultiple2(v: &TVec, Multiple: &TVec) -> TVec +pub fn floorMultiple2(v: &TVec, Multiple: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -35,7 +35,7 @@ pub fn floorPowerOfTwo(v: IU) -> IU { unimplemented!() } -pub fn floorPowerOfTwo2(v: &TVec) -> TVec +pub fn floorPowerOfTwo2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -44,12 +44,12 @@ pub fn isMultiple(v: IU, Multiple: IU) -> bool { unimplemented!() } -pub fn isMultiple2(v: &TVec,Multiple: N) -> TVec +pub fn isMultiple2(v: &TVec,Multiple: N) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn isMultiple3(v: &TVec, Multiple: &TVec) -> TVec +pub fn isMultiple3(v: &TVec, Multiple: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -58,7 +58,7 @@ pub fn isPowerOfTwo2(v: IU) -> bool { unimplemented!() } -pub fn isPowerOfTwo(v: &TVec) -> TVec +pub fn isPowerOfTwo(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -67,7 +67,7 @@ pub fn roundMultiple(v: T, Multiple: T) -> T { unimplemented!() } -pub fn roundMultiple2(v: &TVec, Multiple: &TVec) -> TVec +pub fn roundMultiple2(v: &TVec, Multiple: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -76,7 +76,7 @@ pub fn roundPowerOfTwo(v: IU) -> IU { unimplemented!() } -pub fn roundPowerOfTwo2(v: &TVec) -> TVec +pub fn roundPowerOfTwo2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } diff --git a/nalgebra-glm/src/gtc/type_ptr.rs b/nalgebra-glm/src/gtc/type_ptr.rs index 4029bf01..92309e93 100644 --- a/nalgebra-glm/src/gtc/type_ptr.rs +++ b/nalgebra-glm/src/gtc/type_ptr.rs @@ -7,62 +7,62 @@ use crate::aliases::{ use crate::traits::{Alloc, Dimension, Number}; /// Creates a 2x2 matrix from a slice arranged in column-major order. -pub fn make_mat2(ptr: &[N]) -> TMat2 { +pub fn make_mat2(ptr: &[N]) -> TMat2 { TMat2::from_column_slice(ptr) } /// Creates a 2x2 matrix from a slice arranged in column-major order. -pub fn make_mat2x2(ptr: &[N]) -> TMat2 { +pub fn make_mat2x2(ptr: &[N]) -> TMat2 { TMat2::from_column_slice(ptr) } /// Creates a 2x3 matrix from a slice arranged in column-major order. -pub fn make_mat2x3(ptr: &[N]) -> TMat2x3 { +pub fn make_mat2x3(ptr: &[N]) -> TMat2x3 { TMat2x3::from_column_slice(ptr) } /// Creates a 2x4 matrix from a slice arranged in column-major order. -pub fn make_mat2x4(ptr: &[N]) -> TMat2x4 { +pub fn make_mat2x4(ptr: &[N]) -> TMat2x4 { TMat2x4::from_column_slice(ptr) } /// Creates a 3 matrix from a slice arranged in column-major order. -pub fn make_mat3(ptr: &[N]) -> TMat3 { +pub fn make_mat3(ptr: &[N]) -> TMat3 { TMat3::from_column_slice(ptr) } /// Creates a 3x2 matrix from a slice arranged in column-major order. -pub fn make_mat3x2(ptr: &[N]) -> TMat3x2 { +pub fn make_mat3x2(ptr: &[N]) -> TMat3x2 { TMat3x2::from_column_slice(ptr) } /// Creates a 3x3 matrix from a slice arranged in column-major order. -pub fn make_mat3x3(ptr: &[N]) -> TMat3 { +pub fn make_mat3x3(ptr: &[N]) -> TMat3 { TMat3::from_column_slice(ptr) } /// Creates a 3x4 matrix from a slice arranged in column-major order. -pub fn make_mat3x4(ptr: &[N]) -> TMat3x4 { +pub fn make_mat3x4(ptr: &[N]) -> TMat3x4 { TMat3x4::from_column_slice(ptr) } /// Creates a 4x4 matrix from a slice arranged in column-major order. -pub fn make_mat4(ptr: &[N]) -> TMat4 { +pub fn make_mat4(ptr: &[N]) -> TMat4 { TMat4::from_column_slice(ptr) } /// Creates a 4x2 matrix from a slice arranged in column-major order. -pub fn make_mat4x2(ptr: &[N]) -> TMat4x2 { +pub fn make_mat4x2(ptr: &[N]) -> TMat4x2 { TMat4x2::from_column_slice(ptr) } /// Creates a 4x3 matrix from a slice arranged in column-major order. -pub fn make_mat4x3(ptr: &[N]) -> TMat4x3 { +pub fn make_mat4x3(ptr: &[N]) -> TMat4x3 { TMat4x3::from_column_slice(ptr) } /// Creates a 4x4 matrix from a slice arranged in column-major order. -pub fn make_mat4x4(ptr: &[N]) -> TMat4 { +pub fn make_mat4x4(ptr: &[N]) -> TMat4 { TMat4::from_column_slice(ptr) } @@ -75,7 +75,7 @@ pub fn mat2_to_mat3(m: &TMat2) -> TMat3 { } /// Converts a 3x3 matrix to a 2x2 matrix. -pub fn mat3_to_mat2(m: &TMat3) -> TMat2 { +pub fn mat3_to_mat2(m: &TMat3) -> TMat2 { TMat2::new(m.m11, m.m12, m.m21, m.m22) } @@ -90,7 +90,7 @@ pub fn mat3_to_mat4(m: &TMat3) -> TMat4 { } /// Converts a 4x4 matrix to a 3x3 matrix. -pub fn mat4_to_mat3(m: &TMat4) -> TMat3 { +pub fn mat4_to_mat3(m: &TMat4) -> TMat3 { TMat3::new( m.m11, m.m12, m.m13, m.m21, m.m22, m.m23, m.m31, m.m32, m.m33, ) @@ -107,7 +107,7 @@ pub fn mat2_to_mat4(m: &TMat2) -> TMat4 { } /// Converts a 4x4 matrix to a 2x2 matrix. -pub fn mat4_to_mat2(m: &TMat4) -> TMat2 { +pub fn mat4_to_mat2(m: &TMat4) -> TMat2 { TMat2::new(m.m11, m.m12, m.m21, m.m22) } @@ -123,7 +123,7 @@ pub fn make_quat(ptr: &[N]) -> Qua { /// * [`make_vec2`](fn.make_vec2.html) /// * [`make_vec3`](fn.make_vec3.html) /// * [`make_vec4`](fn.make_vec4.html) -pub fn make_vec1(v: &TVec1) -> TVec1 { +pub fn make_vec1(v: &TVec1) -> TVec1 { *v } @@ -137,7 +137,7 @@ pub fn make_vec1(v: &TVec1) -> TVec1 { /// * [`vec1_to_vec2`](fn.vec1_to_vec2.html) /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) -pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { +pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { TVec1::new(v.x) } @@ -151,7 +151,7 @@ pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { /// * [`vec1_to_vec2`](fn.vec1_to_vec2.html) /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) -pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { +pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { TVec1::new(v.x) } @@ -165,7 +165,7 @@ pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { /// * [`vec1_to_vec2`](fn.vec1_to_vec2.html) /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) -pub fn vec4_to_vec1(v: &TVec4) -> TVec1 { +pub fn vec4_to_vec1(v: &TVec4) -> TVec1 { TVec1::new(v.x) } @@ -196,7 +196,7 @@ pub fn vec1_to_vec2(v: &TVec1) -> TVec2 { /// * [`vec2_to_vec2`](fn.vec2_to_vec2.html) /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) -pub fn vec2_to_vec2(v: &TVec2) -> TVec2 { +pub fn vec2_to_vec2(v: &TVec2) -> TVec2 { *v } @@ -210,7 +210,7 @@ pub fn vec2_to_vec2(v: &TVec2) -> TVec2 { /// * [`vec2_to_vec2`](fn.vec2_to_vec2.html) /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) -pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { +pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { TVec2::new(v.x, v.y) } @@ -224,7 +224,7 @@ pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { /// * [`vec2_to_vec2`](fn.vec2_to_vec2.html) /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) -pub fn vec4_to_vec2(v: &TVec4) -> TVec2 { +pub fn vec4_to_vec2(v: &TVec4) -> TVec2 { TVec2::new(v.x, v.y) } @@ -235,7 +235,7 @@ pub fn vec4_to_vec2(v: &TVec4) -> TVec2 { /// * [`make_vec1`](fn.make_vec1.html) /// * [`make_vec3`](fn.make_vec3.html) /// * [`make_vec4`](fn.make_vec4.html) -pub fn make_vec2(ptr: &[N]) -> TVec2 { +pub fn make_vec2(ptr: &[N]) -> TVec2 { TVec2::from_column_slice(ptr) } @@ -280,7 +280,7 @@ pub fn vec2_to_vec3(v: &TVec2) -> TVec3 { /// * [`vec3_to_vec1`](fn.vec3_to_vec1.html) /// * [`vec3_to_vec2`](fn.vec3_to_vec2.html) /// * [`vec3_to_vec4`](fn.vec3_to_vec4.html) -pub fn vec3_to_vec3(v: &TVec3) -> TVec3 { +pub fn vec3_to_vec3(v: &TVec3) -> TVec3 { *v } @@ -294,7 +294,7 @@ pub fn vec3_to_vec3(v: &TVec3) -> TVec3 { /// * [`vec3_to_vec1`](fn.vec3_to_vec1.html) /// * [`vec3_to_vec2`](fn.vec3_to_vec2.html) /// * [`vec3_to_vec4`](fn.vec3_to_vec4.html) -pub fn vec4_to_vec3(v: &TVec4) -> TVec3 { +pub fn vec4_to_vec3(v: &TVec4) -> TVec3 { TVec3::new(v.x, v.y, v.z) } @@ -305,7 +305,7 @@ pub fn vec4_to_vec3(v: &TVec4) -> TVec3 { /// * [`make_vec1`](fn.make_vec1.html) /// * [`make_vec2`](fn.make_vec2.html) /// * [`make_vec4`](fn.make_vec4.html) -pub fn make_vec3(ptr: &[N]) -> TVec3 { +pub fn make_vec3(ptr: &[N]) -> TVec3 { TVec3::from_column_slice(ptr) } @@ -367,7 +367,7 @@ pub fn vec3_to_vec4(v: &TVec3) -> TVec4 { /// * [`vec4_to_vec1`](fn.vec4_to_vec1.html) /// * [`vec4_to_vec2`](fn.vec4_to_vec2.html) /// * [`vec4_to_vec3`](fn.vec4_to_vec3.html) -pub fn vec4_to_vec4(v: &TVec4) -> TVec4 { +pub fn vec4_to_vec4(v: &TVec4) -> TVec4 { *v } @@ -378,18 +378,18 @@ pub fn vec4_to_vec4(v: &TVec4) -> TVec4 { /// * [`make_vec1`](fn.make_vec1.html) /// * [`make_vec2`](fn.make_vec2.html) /// * [`make_vec3`](fn.make_vec3.html) -pub fn make_vec4(ptr: &[N]) -> TVec4 { +pub fn make_vec4(ptr: &[N]) -> TVec4 { TVec4::from_column_slice(ptr) } /// Converts a matrix or vector to a slice arranged in column-major order. -pub fn value_ptr(x: &TMat) -> &[N] +pub fn value_ptr(x: &TMat) -> &[N] where DefaultAllocator: Alloc { x.as_slice() } /// Converts a matrix or vector to a mutable slice arranged in column-major order. -pub fn value_ptr_mut(x: &mut TMat) -> &mut [N] +pub fn value_ptr_mut(x: &mut TMat) -> &mut [N] where DefaultAllocator: Alloc { x.as_mut_slice() } diff --git a/nalgebra-glm/src/gtc/ulp.rs b/nalgebra-glm/src/gtc/ulp.rs index 8258d0df..42ef2d05 100644 --- a/nalgebra-glm/src/gtc/ulp.rs +++ b/nalgebra-glm/src/gtc/ulp.rs @@ -7,7 +7,7 @@ pub fn float_distance(x: T, y: T) -> u64 { unimplemented!() } -pub fn float_distance2(x: &TVec2, y: &TVec2) -> TVec { +pub fn float_distance2(x: &TVec2, y: &TVec2) -> TVec { unimplemented!() } diff --git a/nalgebra-glm/src/integer.rs b/nalgebra-glm/src/integer.rs index 198d737a..3a2641e0 100644 --- a/nalgebra-glm/src/integer.rs +++ b/nalgebra-glm/src/integer.rs @@ -7,22 +7,22 @@ pub fn bitCount(v: T) -> i32 { unimplemented!() } -pub fn bitCount2(v: &TVec) -> TVec +pub fn bitCount2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn bitfieldExtract(Value: &TVec, Offset: i32, Bits: i32) -> TVec +pub fn bitfieldExtract(Value: &TVec, Offset: i32, Bits: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn bitfieldInsert(Base: &TVec, Insert: &TVec, Offset: i32, Bits: i32) -> TVec +pub fn bitfieldInsert(Base: &TVec, Insert: &TVec, Offset: i32, Bits: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn bitfieldReverse(v: &TVec) -> TVec +pub fn bitfieldReverse(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -31,7 +31,7 @@ pub fn findLSB(x: IU) -> u32 { unimplemented!() } -pub fn findLSB2(v: &TVec) -> TVec +pub fn findLSB2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -40,27 +40,27 @@ pub fn findMSB(x: IU) -> i32 { unimplemented!() } -pub fn findMSB2(v: &TVec) -> TVec +pub fn findMSB2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn imulExtended(x: &TVec, y: &TVec, msb: &TVec, lsb: &TVec) +pub fn imulExtended(x: &TVec, y: &TVec, msb: &TVec, lsb: &TVec) where DefaultAllocator: Alloc { unimplemented!() } -pub fn uaddCarry(x: &TVec, y: &TVec, carry: &TVec) -> TVec +pub fn uaddCarry(x: &TVec, y: &TVec, carry: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn umulExtended(x: &TVec, y: &TVec, msb: &TVec, lsb: &TVec) +pub fn umulExtended(x: &TVec, y: &TVec, msb: &TVec, lsb: &TVec) where DefaultAllocator: Alloc { unimplemented!() } -pub fn usubBorrow(x: &TVec, y: &TVec, borrow: &TVec) -> TVec +pub fn usubBorrow(x: &TVec, y: &TVec, borrow: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } diff --git a/nalgebra-glm/src/matrix.rs b/nalgebra-glm/src/matrix.rs index a4c4efe9..e83687dc 100644 --- a/nalgebra-glm/src/matrix.rs +++ b/nalgebra-glm/src/matrix.rs @@ -40,7 +40,7 @@ where } /// The transpose of the matrix `m`. -pub fn transpose(x: &TMat) -> TMat +pub fn transpose(x: &TMat) -> TMat where DefaultAllocator: Alloc { x.transpose() } diff --git a/nalgebra-glm/src/packing.rs b/nalgebra-glm/src/packing.rs index 3273fb26..641ad159 100644 --- a/nalgebra-glm/src/packing.rs +++ b/nalgebra-glm/src/packing.rs @@ -3,50 +3,50 @@ use na::Scalar; use crate::aliases::{Vec2, Vec4, UVec2}; -pub fn packDouble2x32(v: &UVec2) -> f64 { +pub fn packDouble2x32(v: &UVec2) -> f64 { unimplemented!() } -pub fn packHalf2x16(v: &Vec2) -> u32 { +pub fn packHalf2x16(v: &Vec2) -> u32 { unimplemented!() } -pub fn packSnorm2x16(v: &Vec2) -> u32 { +pub fn packSnorm2x16(v: &Vec2) -> u32 { unimplemented!() } -pub fn packSnorm4x8(v: &Vec4) -> u32 { +pub fn packSnorm4x8(v: &Vec4) -> u32 { unimplemented!() } -pub fn packUnorm2x16(v: &Vec2) -> u32 { +pub fn packUnorm2x16(v: &Vec2) -> u32 { unimplemented!() } -pub fn packUnorm4x8(v: &Vec4) -> u32 { +pub fn packUnorm4x8(v: &Vec4) -> u32 { unimplemented!() } -pub fn unpackDouble2x32(v: f64) -> UVec2 { +pub fn unpackDouble2x32(v: f64) -> UVec2 { unimplemented!() } -pub fn unpackHalf2x16(v: u32) -> Vec2 { +pub fn unpackHalf2x16(v: u32) -> Vec2 { unimplemented!() } -pub fn unpackSnorm2x16(p: u32) -> Vec2 { +pub fn unpackSnorm2x16(p: u32) -> Vec2 { unimplemented!() } -pub fn unpackSnorm4x8(p: u32) -> Vec4 { +pub fn unpackSnorm4x8(p: u32) -> Vec4 { unimplemented!() } -pub fn unpackUnorm2x16(p: u32) -> Vec2 { +pub fn unpackUnorm2x16(p: u32) -> Vec2 { unimplemented!() } -pub fn unpackUnorm4x8(p: u32) -> Vec4 { +pub fn unpackUnorm4x8(p: u32) -> Vec4 { unimplemented!() } diff --git a/nalgebra-glm/src/traits.rs b/nalgebra-glm/src/traits.rs index d338539d..ac3aa667 100644 --- a/nalgebra-glm/src/traits.rs +++ b/nalgebra-glm/src/traits.rs @@ -11,16 +11,16 @@ impl> Dimension for D {} /// A number that can either be an integer or a float. pub trait Number: - Scalar + Ring + Lattice + AbsDiffEq + Signed + FromPrimitive + Bounded + Scalar + Copy + Ring + Lattice + AbsDiffEq + Signed + FromPrimitive + Bounded { } -impl + Signed + FromPrimitive + Bounded> +impl + Signed + FromPrimitive + Bounded> Number for T {} #[doc(hidden)] -pub trait Alloc: +pub trait Alloc: Allocator + Allocator + Allocator @@ -50,7 +50,7 @@ pub trait Alloc: { } -impl Alloc for T where T: Allocator +impl Alloc for T where T: Allocator + Allocator + Allocator + Allocator diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 38144d8a..17247356 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -253,7 +253,7 @@ where #[cfg(feature = "serde-serialize")] impl Serialize for ArrayStorage where - N: Scalar + Serialize, + N: Scalar + Copy + Serialize, R: DimName, C: DimName, R::Value: Mul, @@ -274,7 +274,7 @@ where #[cfg(feature = "serde-serialize")] impl<'a, N, R, C> Deserialize<'a> for ArrayStorage where - N: Scalar + Deserialize<'a>, + N: Scalar + Copy + Deserialize<'a>, R: DimName, C: DimName, R::Value: Mul, @@ -312,7 +312,7 @@ where #[cfg(feature = "serde-serialize")] impl<'a, N, R, C> Visitor<'a> for ArrayStorageVisitor where - N: Scalar + Deserialize<'a>, + N: Scalar + Copy + Deserialize<'a>, R: DimName, C: DimName, R::Value: Mul, diff --git a/src/base/construction.rs b/src/base/construction.rs index c28c043b..d7f1fef8 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -781,7 +781,7 @@ impl Arbitrary for MatrixMN where R: Dim, C: Dim, - N: Scalar + Arbitrary + Send, + N: Scalar + Copy + Arbitrary + Send, DefaultAllocator: Allocator, Owned: Clone + Send, { diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 47d0e7e8..530c18a8 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -138,7 +138,7 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for Point +impl Arbitrary for Point where DefaultAllocator: Allocator, >::Buffer: Send, diff --git a/src/geometry/translation_construction.rs b/src/geometry/translation_construction.rs index 266b4b3b..370fbf63 100644 --- a/src/geometry/translation_construction.rs +++ b/src/geometry/translation_construction.rs @@ -59,7 +59,7 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for Translation +impl Arbitrary for Translation where DefaultAllocator: Allocator, Owned: Send, From 6bb355f4d0b9e364d8260695fa0221af96ab0ee6 Mon Sep 17 00:00:00 2001 From: Aaron Hill Date: Thu, 21 Nov 2019 17:22:00 -0500 Subject: [PATCH 42/67] Fix some out-of-bounds `offset` calls After we yield the final element from the iterator, we don't offset `ptr` agian, to avoid having it go out-of-bounds. However, `inner_end` may be several elements out-of-bounds, depending on the value of `size`. Therefore, we use `wrapping_offset` to avoid undefined behavior. --- src/base/iter.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/base/iter.rs b/src/base/iter.rs index bad4e0be..65cdb20a 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -74,7 +74,12 @@ macro_rules! iterator { // Jump to the next outer dimension if needed. if self.ptr == self.inner_end { let stride = self.strides.1.value() as isize; - self.inner_end = self.ptr.offset(stride); + // This might go past the end of the allocation, + // depending on the value of 'size'. We use + // `wrapping_offset` to avoid UB + self.inner_end = self.ptr.wrapping_offset(stride); + // This will always be in bounds, since + // we're going to dereference it self.ptr = self.inner_ptr.offset(stride); self.inner_ptr = self.ptr; } @@ -83,8 +88,13 @@ macro_rules! iterator { let old = self.ptr; let stride = self.strides.0.value() as isize; - self.ptr = self.ptr.offset(stride); - + // Don't offset `self.ptr` for the last element, + // as this will be out of bounds. Iteration is done + // at this point (the next call to `next` will return `None`) + // so this is not observable. + if self.size != 0 { + self.ptr = self.ptr.offset(stride); + } Some(mem::transmute(old)) } } From 774f4da3e2b2556eaf1cf9fabd7ce59748a22d47 Mon Sep 17 00:00:00 2001 From: Avi Weinstock Date: Mon, 16 Dec 2019 18:09:14 -0500 Subject: [PATCH 43/67] Add `Clone` to `Scalar`, providing a default implementation of `inlined_clone`. Change `Scalar + Clone` bounds to just `Scalar`. --- examples/scalar_genericity.rs | 4 +- nalgebra-glm/src/common.rs | 6 +- nalgebra-glm/src/constructors.rs | 32 ++++----- nalgebra-glm/src/gtc/bitfield.rs | 10 +-- nalgebra-glm/src/gtc/integer.rs | 4 +- nalgebra-glm/src/gtc/matrix_access.rs | 8 +-- nalgebra-glm/src/gtc/packing.rs | 6 +- nalgebra-glm/src/gtc/round.rs | 18 ++--- nalgebra-glm/src/gtc/type_ptr.rs | 60 ++++++++-------- nalgebra-glm/src/gtc/ulp.rs | 2 +- nalgebra-glm/src/integer.rs | 20 +++--- nalgebra-glm/src/matrix.rs | 2 +- nalgebra-glm/src/packing.rs | 24 +++---- nalgebra-glm/src/traits.rs | 4 +- nalgebra-lapack/src/cholesky.rs | 2 +- nalgebra-lapack/src/eigen.rs | 4 +- nalgebra-lapack/src/hessenberg.rs | 2 +- nalgebra-lapack/src/lu.rs | 2 +- nalgebra-lapack/src/qr.rs | 2 +- nalgebra-lapack/src/schur.rs | 4 +- nalgebra-lapack/src/svd.rs | 4 +- nalgebra-lapack/src/symmetric_eigen.rs | 4 +- src/base/allocator.rs | 12 ++-- src/base/array_storage.rs | 16 ++--- src/base/blas.rs | 18 ++--- src/base/cg.rs | 6 +- src/base/componentwise.rs | 6 +- src/base/construction.rs | 24 +++---- src/base/construction_slice.rs | 16 ++--- src/base/conversion.rs | 54 +++++++------- src/base/coordinates.rs | 6 +- src/base/default_allocator.rs | 20 +++--- src/base/edition.rs | 24 +++---- src/base/indexing.rs | 18 ++--- src/base/iter.rs | 40 +++++------ src/base/matrix.rs | 90 ++++++++++++------------ src/base/matrix_alga.rs | 28 ++++---- src/base/matrix_slice.rs | 40 +++++------ src/base/ops.rs | 64 ++++++++--------- src/base/properties.rs | 2 +- src/base/scalar.rs | 8 +-- src/base/statistics.rs | 4 +- src/base/storage.rs | 8 +-- src/base/swizzle.rs | 2 +- src/base/vec_storage.rs | 18 ++--- src/debug/random_orthogonal.rs | 2 +- src/debug/random_sdp.rs | 2 +- src/geometry/op_macros.rs | 8 +-- src/geometry/perspective.rs | 2 +- src/geometry/point.rs | 26 +++---- src/geometry/point_alga.rs | 10 +-- src/geometry/point_construction.rs | 14 ++-- src/geometry/point_conversion.rs | 20 +++--- src/geometry/point_coordinates.rs | 4 +- src/geometry/point_ops.rs | 18 ++--- src/geometry/reflection.rs | 2 +- src/geometry/rotation.rs | 24 +++---- src/geometry/rotation_construction.rs | 4 +- src/geometry/rotation_ops.rs | 2 +- src/geometry/swizzle.rs | 2 +- src/geometry/translation.rs | 28 ++++---- src/geometry/translation_construction.rs | 10 +-- src/geometry/translation_conversion.rs | 8 +-- src/geometry/translation_coordinates.rs | 4 +- src/linalg/lu.rs | 4 +- src/linalg/permutation_sequence.rs | 8 +-- src/sparse/cs_matrix.rs | 28 ++++---- src/sparse/cs_matrix_conversion.rs | 8 +-- src/sparse/cs_matrix_ops.rs | 12 ++-- 69 files changed, 499 insertions(+), 499 deletions(-) diff --git a/examples/scalar_genericity.rs b/examples/scalar_genericity.rs index c1d363f7..75f6f9d4 100644 --- a/examples/scalar_genericity.rs +++ b/examples/scalar_genericity.rs @@ -4,11 +4,11 @@ extern crate nalgebra as na; use alga::general::{RealField, RingCommutative}; use na::{Scalar, Vector3}; -fn print_vector(m: &Vector3) { +fn print_vector(m: &Vector3) { println!("{:?}", m) } -fn print_squared_norm(v: &Vector3) { +fn print_squared_norm(v: &Vector3) { // NOTE: alternatively, nalgebra already defines `v.squared_norm()`. let sqnorm = v.dot(v); println!("{:?}", sqnorm); diff --git a/nalgebra-glm/src/common.rs b/nalgebra-glm/src/common.rs index 42f26d30..eda9f295 100644 --- a/nalgebra-glm/src/common.rs +++ b/nalgebra-glm/src/common.rs @@ -297,13 +297,13 @@ where DefaultAllocator: Alloc { v.map(int_bits_to_float) } -//pub fn isinf(x: &TVec) -> TVec +//pub fn isinf(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // unimplemented!() // //} // -//pub fn isnan(x: &TVec) -> TVec +//pub fn isnan(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // unimplemented!() // @@ -504,7 +504,7 @@ where DefaultAllocator: Alloc { x.map(|x| x.round()) } -//pub fn roundEven(x: &TVec) -> TVec +//pub fn roundEven(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // unimplemented!() //} diff --git a/nalgebra-glm/src/constructors.rs b/nalgebra-glm/src/constructors.rs index 020bbcb4..949ea9e4 100644 --- a/nalgebra-glm/src/constructors.rs +++ b/nalgebra-glm/src/constructors.rs @@ -15,28 +15,28 @@ use crate::aliases::{TMat, Qua, TVec1, TVec2, TVec3, TVec4, TMat2, TMat2x3, TMat /// # use nalgebra_glm as glm; /// let v = glm::vec1(true); /// ``` -pub fn vec1(x: N) -> TVec1 { +pub fn vec1(x: N) -> TVec1 { TVec1::new(x) } /// Creates a new 2D vector. -pub fn vec2(x: N, y: N) -> TVec2 { +pub fn vec2(x: N, y: N) -> TVec2 { TVec2::new(x, y) } /// Creates a new 3D vector. -pub fn vec3(x: N, y: N, z: N) -> TVec3 { +pub fn vec3(x: N, y: N, z: N) -> TVec3 { TVec3::new(x, y, z) } /// Creates a new 4D vector. -pub fn vec4(x: N, y: N, z: N, w: N) -> TVec4 { +pub fn vec4(x: N, y: N, z: N, w: N) -> TVec4 { TVec4::new(x, y, z, w) } /// Create a new 2x2 matrix. -pub fn mat2(m11: N, m12: N, +pub fn mat2(m11: N, m12: N, m21: N, m22: N) -> TMat2 { TMat::::new( m11, m12, @@ -45,7 +45,7 @@ pub fn mat2(m11: N, m12: N, } /// Create a new 2x2 matrix. -pub fn mat2x2(m11: N, m12: N, +pub fn mat2x2(m11: N, m12: N, m21: N, m22: N) -> TMat2 { TMat::::new( m11, m12, @@ -54,7 +54,7 @@ pub fn mat2x2(m11: N, m12: N, } /// Create a new 2x3 matrix. -pub fn mat2x3(m11: N, m12: N, m13: N, +pub fn mat2x3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N) -> TMat2x3 { TMat::::new( m11, m12, m13, @@ -63,7 +63,7 @@ pub fn mat2x3(m11: N, m12: N, m13: N, } /// Create a new 2x4 matrix. -pub fn mat2x4(m11: N, m12: N, m13: N, m14: N, +pub fn mat2x4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N) -> TMat2x4 { TMat::::new( m11, m12, m13, m14, @@ -72,7 +72,7 @@ pub fn mat2x4(m11: N, m12: N, m13: N, m14: N, } /// Create a new 3x3 matrix. -pub fn mat3(m11: N, m12: N, m13: N, +pub fn mat3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N, m31: N, m32: N, m33: N) -> TMat3 { TMat::::new( @@ -83,7 +83,7 @@ pub fn mat3(m11: N, m12: N, m13: N, } /// Create a new 3x2 matrix. -pub fn mat3x2(m11: N, m12: N, +pub fn mat3x2(m11: N, m12: N, m21: N, m22: N, m31: N, m32: N) -> TMat3x2 { TMat::::new( @@ -94,7 +94,7 @@ pub fn mat3x2(m11: N, m12: N, } /// Create a new 3x3 matrix. -pub fn mat3x3(m11: N, m12: N, m13: N, +pub fn mat3x3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N, m31: N, m32: N, m33: N) -> TMat3 { TMat::::new( @@ -105,7 +105,7 @@ pub fn mat3x3(m11: N, m12: N, m13: N, } /// Create a new 3x4 matrix. -pub fn mat3x4(m11: N, m12: N, m13: N, m14: N, +pub fn mat3x4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N, m31: N, m32: N, m33: N, m34: N) -> TMat3x4 { TMat::::new( @@ -116,7 +116,7 @@ pub fn mat3x4(m11: N, m12: N, m13: N, m14: N, } /// Create a new 4x2 matrix. -pub fn mat4x2(m11: N, m12: N, +pub fn mat4x2(m11: N, m12: N, m21: N, m22: N, m31: N, m32: N, m41: N, m42: N) -> TMat4x2 { @@ -129,7 +129,7 @@ pub fn mat4x2(m11: N, m12: N, } /// Create a new 4x3 matrix. -pub fn mat4x3(m11: N, m12: N, m13: N, +pub fn mat4x3(m11: N, m12: N, m13: N, m21: N, m22: N, m23: N, m31: N, m32: N, m33: N, m41: N, m42: N, m43: N) -> TMat4x3 { @@ -142,7 +142,7 @@ pub fn mat4x3(m11: N, m12: N, m13: N, } /// Create a new 4x4 matrix. -pub fn mat4x4(m11: N, m12: N, m13: N, m14: N, +pub fn mat4x4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N, m31: N, m32: N, m33: N, m34: N, m41: N, m42: N, m43: N, m44: N) -> TMat4 { @@ -155,7 +155,7 @@ pub fn mat4x4(m11: N, m12: N, m13: N, m14: N, } /// Create a new 4x4 matrix. -pub fn mat4(m11: N, m12: N, m13: N, m14: N, +pub fn mat4(m11: N, m12: N, m13: N, m14: N, m21: N, m22: N, m23: N, m24: N, m31: N, m32: N, m33: N, m34: N, m41: N, m42: N, m43: N, m44: N) -> TMat4 { diff --git a/nalgebra-glm/src/gtc/bitfield.rs b/nalgebra-glm/src/gtc/bitfield.rs index 81a1646f..bdf18552 100644 --- a/nalgebra-glm/src/gtc/bitfield.rs +++ b/nalgebra-glm/src/gtc/bitfield.rs @@ -19,7 +19,7 @@ pub fn bitfieldFillOne(Value: IU, FirstBit: i32, BitCount: i32) -> IU { unimplemented!() } -pub fn bitfieldFillOne2(Value: &TVec, FirstBit: i32, BitCount: i32) -> TVec +pub fn bitfieldFillOne2(Value: &TVec, FirstBit: i32, BitCount: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -28,7 +28,7 @@ pub fn bitfieldFillZero(Value: IU, FirstBit: i32, BitCount: i32) -> IU { unimplemented!() } -pub fn bitfieldFillZero2(Value: &TVec, FirstBit: i32, BitCount: i32) -> TVec +pub fn bitfieldFillZero2(Value: &TVec, FirstBit: i32, BitCount: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -113,7 +113,7 @@ pub fn bitfieldRotateLeft(In: IU, Shift: i32) -> IU { unimplemented!() } -pub fn bitfieldRotateLeft2(In: &TVec, Shift: i32) -> TVec +pub fn bitfieldRotateLeft2(In: &TVec, Shift: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -122,7 +122,7 @@ pub fn bitfieldRotateRight(In: IU, Shift: i32) -> IU { unimplemented!() } -pub fn bitfieldRotateRight2(In: &TVec, Shift: i32) -> TVec +pub fn bitfieldRotateRight2(In: &TVec, Shift: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -131,7 +131,7 @@ pub fn mask(Bits: IU) -> IU { unimplemented!() } -pub fn mask2(v: &TVec) -> TVec +pub fn mask2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } diff --git a/nalgebra-glm/src/gtc/integer.rs b/nalgebra-glm/src/gtc/integer.rs index d5965f38..a972a4ab 100644 --- a/nalgebra-glm/src/gtc/integer.rs +++ b/nalgebra-glm/src/gtc/integer.rs @@ -3,7 +3,7 @@ //use crate::traits::{Alloc, Dimension}; //use crate::aliases::TVec; -//pub fn iround(x: &TVec) -> TVec +//pub fn iround(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // x.map(|x| x.round()) //} @@ -12,7 +12,7 @@ // unimplemented!() //} // -//pub fn uround(x: &TVec) -> TVec +//pub fn uround(x: &TVec) -> TVec // where DefaultAllocator: Alloc { // unimplemented!() //} diff --git a/nalgebra-glm/src/gtc/matrix_access.rs b/nalgebra-glm/src/gtc/matrix_access.rs index bfc0199b..f61d9782 100644 --- a/nalgebra-glm/src/gtc/matrix_access.rs +++ b/nalgebra-glm/src/gtc/matrix_access.rs @@ -10,7 +10,7 @@ use crate::traits::{Alloc, Dimension}; /// * [`row`](fn.row.html) /// * [`set_column`](fn.set_column.html) /// * [`set_row`](fn.set_row.html) -pub fn column( +pub fn column( m: &TMat, index: usize, ) -> TVec @@ -27,7 +27,7 @@ where /// * [`column`](fn.column.html) /// * [`row`](fn.row.html) /// * [`set_row`](fn.set_row.html) -pub fn set_column( +pub fn set_column( m: &TMat, index: usize, x: &TVec, @@ -47,7 +47,7 @@ where /// * [`column`](fn.column.html) /// * [`set_column`](fn.set_column.html) /// * [`set_row`](fn.set_row.html) -pub fn row(m: &TMat, index: usize) -> TVec +pub fn row(m: &TMat, index: usize) -> TVec where DefaultAllocator: Alloc { m.row(index).into_owned().transpose() } @@ -59,7 +59,7 @@ where DefaultAllocator: Alloc { /// * [`column`](fn.column.html) /// * [`row`](fn.row.html) /// * [`set_column`](fn.set_column.html) -pub fn set_row( +pub fn set_row( m: &TMat, index: usize, x: &TVec, diff --git a/nalgebra-glm/src/gtc/packing.rs b/nalgebra-glm/src/gtc/packing.rs index 5d649538..ea5acac4 100644 --- a/nalgebra-glm/src/gtc/packing.rs +++ b/nalgebra-glm/src/gtc/packing.rs @@ -49,7 +49,7 @@ pub fn packInt4x8(v: &I8Vec4) -> i32 { unimplemented!() } -pub fn packRGBM(rgb: &TVec3) -> TVec4 { +pub fn packRGBM(rgb: &TVec3) -> TVec4 { unimplemented!() } @@ -155,7 +155,7 @@ pub fn unpackF3x9_E1x5(p: i32) -> Vec3 { unimplemented!() } -pub fn unpackHalf(p: TVec) -> TVec +pub fn unpackHalf(p: TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -192,7 +192,7 @@ pub fn unpackInt4x8(p: i32) -> I8Vec4 { unimplemented!() } -pub fn unpackRGBM(rgbm: &TVec4) -> TVec3 { +pub fn unpackRGBM(rgbm: &TVec4) -> TVec3 { unimplemented!() } diff --git a/nalgebra-glm/src/gtc/round.rs b/nalgebra-glm/src/gtc/round.rs index 21d3a21e..5ad95780 100644 --- a/nalgebra-glm/src/gtc/round.rs +++ b/nalgebra-glm/src/gtc/round.rs @@ -8,7 +8,7 @@ pub fn ceilMultiple(v: T, Multiple: T) -> T { unimplemented!() } -pub fn ceilMultiple2(v: &TVec, Multiple: &TVec) -> TVec +pub fn ceilMultiple2(v: &TVec, Multiple: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -17,7 +17,7 @@ pub fn ceilPowerOfTwo(v: IU) -> IU { unimplemented!() } -pub fn ceilPowerOfTwo2(v: &TVec) -> TVec +pub fn ceilPowerOfTwo2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -26,7 +26,7 @@ pub fn floorMultiple(v: T, Multiple: T) -> T { unimplemented!() } -pub fn floorMultiple2(v: &TVec, Multiple: &TVec) -> TVec +pub fn floorMultiple2(v: &TVec, Multiple: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -35,7 +35,7 @@ pub fn floorPowerOfTwo(v: IU) -> IU { unimplemented!() } -pub fn floorPowerOfTwo2(v: &TVec) -> TVec +pub fn floorPowerOfTwo2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -44,12 +44,12 @@ pub fn isMultiple(v: IU, Multiple: IU) -> bool { unimplemented!() } -pub fn isMultiple2(v: &TVec,Multiple: N) -> TVec +pub fn isMultiple2(v: &TVec,Multiple: N) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn isMultiple3(v: &TVec, Multiple: &TVec) -> TVec +pub fn isMultiple3(v: &TVec, Multiple: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -58,7 +58,7 @@ pub fn isPowerOfTwo2(v: IU) -> bool { unimplemented!() } -pub fn isPowerOfTwo(v: &TVec) -> TVec +pub fn isPowerOfTwo(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -67,7 +67,7 @@ pub fn roundMultiple(v: T, Multiple: T) -> T { unimplemented!() } -pub fn roundMultiple2(v: &TVec, Multiple: &TVec) -> TVec +pub fn roundMultiple2(v: &TVec, Multiple: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -76,7 +76,7 @@ pub fn roundPowerOfTwo(v: IU) -> IU { unimplemented!() } -pub fn roundPowerOfTwo2(v: &TVec) -> TVec +pub fn roundPowerOfTwo2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } diff --git a/nalgebra-glm/src/gtc/type_ptr.rs b/nalgebra-glm/src/gtc/type_ptr.rs index ad9e6f8f..6de096ca 100644 --- a/nalgebra-glm/src/gtc/type_ptr.rs +++ b/nalgebra-glm/src/gtc/type_ptr.rs @@ -7,62 +7,62 @@ use crate::aliases::{ use crate::traits::{Alloc, Dimension, Number}; /// Creates a 2x2 matrix from a slice arranged in column-major order. -pub fn make_mat2(ptr: &[N]) -> TMat2 { +pub fn make_mat2(ptr: &[N]) -> TMat2 { TMat2::from_column_slice(ptr) } /// Creates a 2x2 matrix from a slice arranged in column-major order. -pub fn make_mat2x2(ptr: &[N]) -> TMat2 { +pub fn make_mat2x2(ptr: &[N]) -> TMat2 { TMat2::from_column_slice(ptr) } /// Creates a 2x3 matrix from a slice arranged in column-major order. -pub fn make_mat2x3(ptr: &[N]) -> TMat2x3 { +pub fn make_mat2x3(ptr: &[N]) -> TMat2x3 { TMat2x3::from_column_slice(ptr) } /// Creates a 2x4 matrix from a slice arranged in column-major order. -pub fn make_mat2x4(ptr: &[N]) -> TMat2x4 { +pub fn make_mat2x4(ptr: &[N]) -> TMat2x4 { TMat2x4::from_column_slice(ptr) } /// Creates a 3 matrix from a slice arranged in column-major order. -pub fn make_mat3(ptr: &[N]) -> TMat3 { +pub fn make_mat3(ptr: &[N]) -> TMat3 { TMat3::from_column_slice(ptr) } /// Creates a 3x2 matrix from a slice arranged in column-major order. -pub fn make_mat3x2(ptr: &[N]) -> TMat3x2 { +pub fn make_mat3x2(ptr: &[N]) -> TMat3x2 { TMat3x2::from_column_slice(ptr) } /// Creates a 3x3 matrix from a slice arranged in column-major order. -pub fn make_mat3x3(ptr: &[N]) -> TMat3 { +pub fn make_mat3x3(ptr: &[N]) -> TMat3 { TMat3::from_column_slice(ptr) } /// Creates a 3x4 matrix from a slice arranged in column-major order. -pub fn make_mat3x4(ptr: &[N]) -> TMat3x4 { +pub fn make_mat3x4(ptr: &[N]) -> TMat3x4 { TMat3x4::from_column_slice(ptr) } /// Creates a 4x4 matrix from a slice arranged in column-major order. -pub fn make_mat4(ptr: &[N]) -> TMat4 { +pub fn make_mat4(ptr: &[N]) -> TMat4 { TMat4::from_column_slice(ptr) } /// Creates a 4x2 matrix from a slice arranged in column-major order. -pub fn make_mat4x2(ptr: &[N]) -> TMat4x2 { +pub fn make_mat4x2(ptr: &[N]) -> TMat4x2 { TMat4x2::from_column_slice(ptr) } /// Creates a 4x3 matrix from a slice arranged in column-major order. -pub fn make_mat4x3(ptr: &[N]) -> TMat4x3 { +pub fn make_mat4x3(ptr: &[N]) -> TMat4x3 { TMat4x3::from_column_slice(ptr) } /// Creates a 4x4 matrix from a slice arranged in column-major order. -pub fn make_mat4x4(ptr: &[N]) -> TMat4 { +pub fn make_mat4x4(ptr: &[N]) -> TMat4 { TMat4::from_column_slice(ptr) } @@ -75,7 +75,7 @@ pub fn mat2_to_mat3(m: &TMat2) -> TMat3 { } /// Converts a 3x3 matrix to a 2x2 matrix. -pub fn mat3_to_mat2(m: &TMat3) -> TMat2 { +pub fn mat3_to_mat2(m: &TMat3) -> TMat2 { TMat2::new(m.m11.inlined_clone(), m.m12.inlined_clone(), m.m21.inlined_clone(), m.m22.inlined_clone()) } @@ -90,7 +90,7 @@ pub fn mat3_to_mat4(m: &TMat3) -> TMat4 { } /// Converts a 4x4 matrix to a 3x3 matrix. -pub fn mat4_to_mat3(m: &TMat4) -> TMat3 { +pub fn mat4_to_mat3(m: &TMat4) -> TMat3 { TMat3::new( m.m11.inlined_clone(), m.m12.inlined_clone(), m.m13.inlined_clone(), m.m21.inlined_clone(), m.m22.inlined_clone(), m.m23.inlined_clone(), @@ -109,7 +109,7 @@ pub fn mat2_to_mat4(m: &TMat2) -> TMat4 { } /// Converts a 4x4 matrix to a 2x2 matrix. -pub fn mat4_to_mat2(m: &TMat4) -> TMat2 { +pub fn mat4_to_mat2(m: &TMat4) -> TMat2 { TMat2::new(m.m11.inlined_clone(), m.m12.inlined_clone(), m.m21.inlined_clone(), m.m22.inlined_clone()) } @@ -125,7 +125,7 @@ pub fn make_quat(ptr: &[N]) -> Qua { /// * [`make_vec2`](fn.make_vec2.html) /// * [`make_vec3`](fn.make_vec3.html) /// * [`make_vec4`](fn.make_vec4.html) -pub fn make_vec1(v: &TVec1) -> TVec1 { +pub fn make_vec1(v: &TVec1) -> TVec1 { v.clone() } @@ -139,7 +139,7 @@ pub fn make_vec1(v: &TVec1) -> TVec1 { /// * [`vec1_to_vec2`](fn.vec1_to_vec2.html) /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) -pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { +pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { TVec1::new(v.x.inlined_clone()) } @@ -153,7 +153,7 @@ pub fn vec2_to_vec1(v: &TVec2) -> TVec1 { /// * [`vec1_to_vec2`](fn.vec1_to_vec2.html) /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) -pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { +pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { TVec1::new(v.x.inlined_clone()) } @@ -167,7 +167,7 @@ pub fn vec3_to_vec1(v: &TVec3) -> TVec1 { /// * [`vec1_to_vec2`](fn.vec1_to_vec2.html) /// * [`vec1_to_vec3`](fn.vec1_to_vec3.html) /// * [`vec1_to_vec4`](fn.vec1_to_vec4.html) -pub fn vec4_to_vec1(v: &TVec4) -> TVec1 { +pub fn vec4_to_vec1(v: &TVec4) -> TVec1 { TVec1::new(v.x.inlined_clone()) } @@ -198,7 +198,7 @@ pub fn vec1_to_vec2(v: &TVec1) -> TVec2 { /// * [`vec2_to_vec2`](fn.vec2_to_vec2.html) /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) -pub fn vec2_to_vec2(v: &TVec2) -> TVec2 { +pub fn vec2_to_vec2(v: &TVec2) -> TVec2 { v.clone() } @@ -212,7 +212,7 @@ pub fn vec2_to_vec2(v: &TVec2) -> TVec2 { /// * [`vec2_to_vec2`](fn.vec2_to_vec2.html) /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) -pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { +pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { TVec2::new(v.x.inlined_clone(), v.y.inlined_clone()) } @@ -226,7 +226,7 @@ pub fn vec3_to_vec2(v: &TVec3) -> TVec2 { /// * [`vec2_to_vec2`](fn.vec2_to_vec2.html) /// * [`vec2_to_vec3`](fn.vec2_to_vec3.html) /// * [`vec2_to_vec4`](fn.vec2_to_vec4.html) -pub fn vec4_to_vec2(v: &TVec4) -> TVec2 { +pub fn vec4_to_vec2(v: &TVec4) -> TVec2 { TVec2::new(v.x.inlined_clone(), v.y.inlined_clone()) } @@ -237,7 +237,7 @@ pub fn vec4_to_vec2(v: &TVec4) -> TVec2 { /// * [`make_vec1`](fn.make_vec1.html) /// * [`make_vec3`](fn.make_vec3.html) /// * [`make_vec4`](fn.make_vec4.html) -pub fn make_vec2(ptr: &[N]) -> TVec2 { +pub fn make_vec2(ptr: &[N]) -> TVec2 { TVec2::from_column_slice(ptr) } @@ -282,7 +282,7 @@ pub fn vec2_to_vec3(v: &TVec2) -> TVec3 { /// * [`vec3_to_vec1`](fn.vec3_to_vec1.html) /// * [`vec3_to_vec2`](fn.vec3_to_vec2.html) /// * [`vec3_to_vec4`](fn.vec3_to_vec4.html) -pub fn vec3_to_vec3(v: &TVec3) -> TVec3 { +pub fn vec3_to_vec3(v: &TVec3) -> TVec3 { v.clone() } @@ -296,7 +296,7 @@ pub fn vec3_to_vec3(v: &TVec3) -> TVec3 { /// * [`vec3_to_vec1`](fn.vec3_to_vec1.html) /// * [`vec3_to_vec2`](fn.vec3_to_vec2.html) /// * [`vec3_to_vec4`](fn.vec3_to_vec4.html) -pub fn vec4_to_vec3(v: &TVec4) -> TVec3 { +pub fn vec4_to_vec3(v: &TVec4) -> TVec3 { TVec3::new(v.x.inlined_clone(), v.y.inlined_clone(), v.z.inlined_clone()) } @@ -307,7 +307,7 @@ pub fn vec4_to_vec3(v: &TVec4) -> TVec3 { /// * [`make_vec1`](fn.make_vec1.html) /// * [`make_vec2`](fn.make_vec2.html) /// * [`make_vec4`](fn.make_vec4.html) -pub fn make_vec3(ptr: &[N]) -> TVec3 { +pub fn make_vec3(ptr: &[N]) -> TVec3 { TVec3::from_column_slice(ptr) } @@ -369,7 +369,7 @@ pub fn vec3_to_vec4(v: &TVec3) -> TVec4 { /// * [`vec4_to_vec1`](fn.vec4_to_vec1.html) /// * [`vec4_to_vec2`](fn.vec4_to_vec2.html) /// * [`vec4_to_vec3`](fn.vec4_to_vec3.html) -pub fn vec4_to_vec4(v: &TVec4) -> TVec4 { +pub fn vec4_to_vec4(v: &TVec4) -> TVec4 { v.clone() } @@ -380,18 +380,18 @@ pub fn vec4_to_vec4(v: &TVec4) -> TVec4 { /// * [`make_vec1`](fn.make_vec1.html) /// * [`make_vec2`](fn.make_vec2.html) /// * [`make_vec3`](fn.make_vec3.html) -pub fn make_vec4(ptr: &[N]) -> TVec4 { +pub fn make_vec4(ptr: &[N]) -> TVec4 { TVec4::from_column_slice(ptr) } /// Converts a matrix or vector to a slice arranged in column-major order. -pub fn value_ptr(x: &TMat) -> &[N] +pub fn value_ptr(x: &TMat) -> &[N] where DefaultAllocator: Alloc { x.as_slice() } /// Converts a matrix or vector to a mutable slice arranged in column-major order. -pub fn value_ptr_mut(x: &mut TMat) -> &mut [N] +pub fn value_ptr_mut(x: &mut TMat) -> &mut [N] where DefaultAllocator: Alloc { x.as_mut_slice() } diff --git a/nalgebra-glm/src/gtc/ulp.rs b/nalgebra-glm/src/gtc/ulp.rs index 07116b15..8258d0df 100644 --- a/nalgebra-glm/src/gtc/ulp.rs +++ b/nalgebra-glm/src/gtc/ulp.rs @@ -7,7 +7,7 @@ pub fn float_distance(x: T, y: T) -> u64 { unimplemented!() } -pub fn float_distance2(x: &TVec2, y: &TVec2) -> TVec { +pub fn float_distance2(x: &TVec2, y: &TVec2) -> TVec { unimplemented!() } diff --git a/nalgebra-glm/src/integer.rs b/nalgebra-glm/src/integer.rs index 73cae447..198d737a 100644 --- a/nalgebra-glm/src/integer.rs +++ b/nalgebra-glm/src/integer.rs @@ -7,22 +7,22 @@ pub fn bitCount(v: T) -> i32 { unimplemented!() } -pub fn bitCount2(v: &TVec) -> TVec +pub fn bitCount2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn bitfieldExtract(Value: &TVec, Offset: i32, Bits: i32) -> TVec +pub fn bitfieldExtract(Value: &TVec, Offset: i32, Bits: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn bitfieldInsert(Base: &TVec, Insert: &TVec, Offset: i32, Bits: i32) -> TVec +pub fn bitfieldInsert(Base: &TVec, Insert: &TVec, Offset: i32, Bits: i32) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn bitfieldReverse(v: &TVec) -> TVec +pub fn bitfieldReverse(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -31,7 +31,7 @@ pub fn findLSB(x: IU) -> u32 { unimplemented!() } -pub fn findLSB2(v: &TVec) -> TVec +pub fn findLSB2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } @@ -40,27 +40,27 @@ pub fn findMSB(x: IU) -> i32 { unimplemented!() } -pub fn findMSB2(v: &TVec) -> TVec +pub fn findMSB2(v: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn imulExtended(x: &TVec, y: &TVec, msb: &TVec, lsb: &TVec) +pub fn imulExtended(x: &TVec, y: &TVec, msb: &TVec, lsb: &TVec) where DefaultAllocator: Alloc { unimplemented!() } -pub fn uaddCarry(x: &TVec, y: &TVec, carry: &TVec) -> TVec +pub fn uaddCarry(x: &TVec, y: &TVec, carry: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } -pub fn umulExtended(x: &TVec, y: &TVec, msb: &TVec, lsb: &TVec) +pub fn umulExtended(x: &TVec, y: &TVec, msb: &TVec, lsb: &TVec) where DefaultAllocator: Alloc { unimplemented!() } -pub fn usubBorrow(x: &TVec, y: &TVec, borrow: &TVec) -> TVec +pub fn usubBorrow(x: &TVec, y: &TVec, borrow: &TVec) -> TVec where DefaultAllocator: Alloc { unimplemented!() } diff --git a/nalgebra-glm/src/matrix.rs b/nalgebra-glm/src/matrix.rs index c9c177b4..a4c4efe9 100644 --- a/nalgebra-glm/src/matrix.rs +++ b/nalgebra-glm/src/matrix.rs @@ -40,7 +40,7 @@ where } /// The transpose of the matrix `m`. -pub fn transpose(x: &TMat) -> TMat +pub fn transpose(x: &TMat) -> TMat where DefaultAllocator: Alloc { x.transpose() } diff --git a/nalgebra-glm/src/packing.rs b/nalgebra-glm/src/packing.rs index e0169cbe..3273fb26 100644 --- a/nalgebra-glm/src/packing.rs +++ b/nalgebra-glm/src/packing.rs @@ -3,50 +3,50 @@ use na::Scalar; use crate::aliases::{Vec2, Vec4, UVec2}; -pub fn packDouble2x32(v: &UVec2) -> f64 { +pub fn packDouble2x32(v: &UVec2) -> f64 { unimplemented!() } -pub fn packHalf2x16(v: &Vec2) -> u32 { +pub fn packHalf2x16(v: &Vec2) -> u32 { unimplemented!() } -pub fn packSnorm2x16(v: &Vec2) -> u32 { +pub fn packSnorm2x16(v: &Vec2) -> u32 { unimplemented!() } -pub fn packSnorm4x8(v: &Vec4) -> u32 { +pub fn packSnorm4x8(v: &Vec4) -> u32 { unimplemented!() } -pub fn packUnorm2x16(v: &Vec2) -> u32 { +pub fn packUnorm2x16(v: &Vec2) -> u32 { unimplemented!() } -pub fn packUnorm4x8(v: &Vec4) -> u32 { +pub fn packUnorm4x8(v: &Vec4) -> u32 { unimplemented!() } -pub fn unpackDouble2x32(v: f64) -> UVec2 { +pub fn unpackDouble2x32(v: f64) -> UVec2 { unimplemented!() } -pub fn unpackHalf2x16(v: u32) -> Vec2 { +pub fn unpackHalf2x16(v: u32) -> Vec2 { unimplemented!() } -pub fn unpackSnorm2x16(p: u32) -> Vec2 { +pub fn unpackSnorm2x16(p: u32) -> Vec2 { unimplemented!() } -pub fn unpackSnorm4x8(p: u32) -> Vec4 { +pub fn unpackSnorm4x8(p: u32) -> Vec4 { unimplemented!() } -pub fn unpackUnorm2x16(p: u32) -> Vec2 { +pub fn unpackUnorm2x16(p: u32) -> Vec2 { unimplemented!() } -pub fn unpackUnorm4x8(p: u32) -> Vec4 { +pub fn unpackUnorm4x8(p: u32) -> Vec4 { unimplemented!() } diff --git a/nalgebra-glm/src/traits.rs b/nalgebra-glm/src/traits.rs index 15efb72b..22fd5581 100644 --- a/nalgebra-glm/src/traits.rs +++ b/nalgebra-glm/src/traits.rs @@ -20,7 +20,7 @@ impl + Signed + Fr {} #[doc(hidden)] -pub trait Alloc: +pub trait Alloc: Allocator + Allocator + Allocator @@ -50,7 +50,7 @@ pub trait Alloc: { } -impl Alloc for T where T: Allocator +impl Alloc for T where T: Allocator + Allocator + Allocator + Allocator diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs index 7dce3a13..e25223fa 100644 --- a/nalgebra-lapack/src/cholesky.rs +++ b/nalgebra-lapack/src/cholesky.rs @@ -28,7 +28,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct Cholesky +pub struct Cholesky where DefaultAllocator: Allocator { l: MatrixN, diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index c645e228..91bc01ac 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -33,7 +33,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct Eigen +pub struct Eigen where DefaultAllocator: Allocator + Allocator { /// The eigenvalues of the decomposed matrix. @@ -311,7 +311,7 @@ where DefaultAllocator: Allocator + Allocator */ /// Trait implemented by scalar type for which Lapack function exist to compute the /// eigendecomposition. -pub trait EigenScalar: Scalar + Clone { +pub trait EigenScalar: Scalar { #[allow(missing_docs)] fn xgeev( jobvl: u8, diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index 92ec6cda..81e72966 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -30,7 +30,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct Hessenberg> +pub struct Hessenberg> where DefaultAllocator: Allocator + Allocator> { h: MatrixN, diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index a636a722..fb4296da 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -37,7 +37,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct LU, C: Dim> +pub struct LU, C: Dim> where DefaultAllocator: Allocator> + Allocator { lu: MatrixMN, diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index 7aa10cb5..d9d28910 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -33,7 +33,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct QR, C: Dim> +pub struct QR, C: Dim> where DefaultAllocator: Allocator + Allocator> { qr: MatrixMN, diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 69d0a29a..0592acdf 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -33,7 +33,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct Schur +pub struct Schur where DefaultAllocator: Allocator + Allocator { re: VectorN, @@ -162,7 +162,7 @@ where DefaultAllocator: Allocator + Allocator * */ /// Trait implemented by scalars for which Lapack implements the RealField Schur decomposition. -pub trait SchurScalar: Scalar + Clone { +pub trait SchurScalar: Scalar { #[allow(missing_docs)] fn xgees( jobvs: u8, diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index ac77fba1..43bb1c20 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -36,7 +36,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct SVD, C: Dim> +pub struct SVD, C: Dim> where DefaultAllocator: Allocator + Allocator> + Allocator { /// The left-singular vectors `U` of this SVD. @@ -57,7 +57,7 @@ where /// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex`, `Complex`) /// supported by the Singular Value Decompotition. -pub trait SVDScalar, C: Dim>: Scalar + Clone +pub trait SVDScalar, C: Dim>: Scalar where DefaultAllocator: Allocator + Allocator + Allocator> diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index eccdf0f7..e575fdc1 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -35,7 +35,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct SymmetricEigen +pub struct SymmetricEigen where DefaultAllocator: Allocator + Allocator { /// The eigenvectors of the decomposed matrix. @@ -169,7 +169,7 @@ where DefaultAllocator: Allocator + Allocator */ /// Trait implemented by scalars for which Lapack implements the eigendecomposition of symmetric /// real matrices. -pub trait SymmetricEigenScalar: Scalar + Clone { +pub trait SymmetricEigenScalar: Scalar { #[allow(missing_docs)] fn xsyev( jobz: u8, diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 3625b059..0ad30981 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -16,7 +16,7 @@ use crate::base::{DefaultAllocator, Scalar}; /// /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. -pub trait Allocator: Any + Sized { +pub trait Allocator: Any + Sized { /// The type of buffer this allocator can instanciate. type Buffer: ContiguousStorageMut + Clone; @@ -33,7 +33,7 @@ pub trait Allocator: Any + Sized { /// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × /// CFrom) elements to a smaller or larger size (RTo, CTo). -pub trait Reallocator: +pub trait Reallocator: Allocator + Allocator { /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer @@ -65,7 +65,7 @@ where R2: Dim, C1: Dim, C2: Dim, - N: Scalar + Clone, + N: Scalar, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } @@ -76,7 +76,7 @@ where R2: Dim, C1: Dim, C2: Dim, - N: Scalar + Clone, + N: Scalar, DefaultAllocator: Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, {} @@ -88,7 +88,7 @@ pub trait SameShapeVectorAllocator: where R1: Dim, R2: Dim, - N: Scalar + Clone, + N: Scalar, ShapeConstraint: SameNumberOfRows, { } @@ -97,7 +97,7 @@ impl SameShapeVectorAllocator for DefaultAllocator where R1: Dim, R2: Dim, - N: Scalar + Clone, + N: Scalar, DefaultAllocator: Allocator + Allocator>, ShapeConstraint: SameNumberOfRows, {} diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index ccddcce6..bebb8740 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -154,7 +154,7 @@ where unsafe impl Storage for ArrayStorage where - N: Scalar + Clone, + N: Scalar, R: DimName, C: DimName, R::Value: Mul, @@ -206,7 +206,7 @@ where unsafe impl StorageMut for ArrayStorage where - N: Scalar + Clone, + N: Scalar, R: DimName, C: DimName, R::Value: Mul, @@ -226,7 +226,7 @@ where unsafe impl ContiguousStorage for ArrayStorage where - N: Scalar + Clone, + N: Scalar, R: DimName, C: DimName, R::Value: Mul, @@ -236,7 +236,7 @@ where unsafe impl ContiguousStorageMut for ArrayStorage where - N: Scalar + Clone, + N: Scalar, R: DimName, C: DimName, R::Value: Mul, @@ -253,7 +253,7 @@ where #[cfg(feature = "serde-serialize")] impl Serialize for ArrayStorage where - N: Scalar + Clone + Serialize, + N: Scalar + Serialize, R: DimName, C: DimName, R::Value: Mul, @@ -274,7 +274,7 @@ where #[cfg(feature = "serde-serialize")] impl<'a, N, R, C> Deserialize<'a> for ArrayStorage where - N: Scalar + Clone + Deserialize<'a>, + N: Scalar + Deserialize<'a>, R: DimName, C: DimName, R::Value: Mul, @@ -295,7 +295,7 @@ struct ArrayStorageVisitor { #[cfg(feature = "serde-serialize")] impl ArrayStorageVisitor where - N: Scalar + Clone, + N: Scalar, R: DimName, C: DimName, R::Value: Mul, @@ -312,7 +312,7 @@ where #[cfg(feature = "serde-serialize")] impl<'a, N, R, C> Visitor<'a> for ArrayStorageVisitor where - N: Scalar + Clone + Deserialize<'a>, + N: Scalar + Deserialize<'a>, R: DimName, C: DimName, R::Value: Mul, diff --git a/src/base/blas.rs b/src/base/blas.rs index 5cdc52dc..d147bbcb 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -48,7 +48,7 @@ impl> Vector { } } -impl> Vector { +impl> Vector { /// Computes the index and value of the vector component with the largest value. /// /// # Examples: @@ -230,7 +230,7 @@ impl> Matrix { } -impl> Matrix { +impl> Matrix { /// Computes the index of the matrix component with the largest absolute value. /// /// # Examples: @@ -264,7 +264,7 @@ impl> Matrix -where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul +where N: Scalar + Zero + ClosedAdd + ClosedMul { #[inline(always)] fn dotx(&self, rhs: &Matrix, conjugate: impl Fn(N) -> N) -> N @@ -469,7 +469,7 @@ where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul } fn array_axcpy(y: &mut [N], a: N, x: &[N], c: N, beta: N, stride1: usize, stride2: usize, len: usize) -where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul { +where N: Scalar + Zero + ClosedAdd + ClosedMul { for i in 0..len { unsafe { let y = y.get_unchecked_mut(i * stride1); @@ -479,7 +479,7 @@ where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul { } fn array_axc(y: &mut [N], a: N, x: &[N], c: N, stride1: usize, stride2: usize, len: usize) -where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul { +where N: Scalar + Zero + ClosedAdd + ClosedMul { for i in 0..len { unsafe { *y.get_unchecked_mut(i * stride1) = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone(); @@ -489,7 +489,7 @@ where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul { impl Vector where - N: Scalar + Clone + Zero + ClosedAdd + ClosedMul, + N: Scalar + Zero + ClosedAdd + ClosedMul, S: StorageMut, { /// Computes `self = a * x * c + b * self`. @@ -886,7 +886,7 @@ where } impl> Matrix -where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul +where N: Scalar + Zero + ClosedAdd + ClosedMul { #[inline(always)] fn gerx( @@ -1249,7 +1249,7 @@ where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul } impl> Matrix -where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul +where N: Scalar + Zero + ClosedAdd + ClosedMul { #[inline(always)] fn xxgerx( @@ -1396,7 +1396,7 @@ where N: Scalar + Clone + Zero + ClosedAdd + ClosedMul } impl> SquareMatrix -where N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul +where N: Scalar + Zero + One + ClosedAdd + ClosedMul { /// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`. /// diff --git a/src/base/cg.rs b/src/base/cg.rs index 262ffecf..df3906ae 100644 --- a/src/base/cg.rs +++ b/src/base/cg.rs @@ -23,7 +23,7 @@ use alga::linear::Transformation; impl MatrixN where - N: Scalar + Clone + Ring, + N: Scalar + Ring, DefaultAllocator: Allocator, { /// Creates a new homogeneous matrix that applies the same scaling factor on each dimension. @@ -153,7 +153,7 @@ impl Matrix4 { } } -impl> SquareMatrix { +impl> SquareMatrix { /// Computes the transformation equal to `self` followed by an uniform scaling factor. #[inline] pub fn append_scaling(&self, scaling: N) -> MatrixN @@ -240,7 +240,7 @@ impl> SquareMatrix> SquareMatrix { +impl> SquareMatrix { /// Computes in-place the transformation equal to `self` followed by an uniform scaling factor. #[inline] pub fn append_scaling_mut(&mut self, scaling: N) diff --git a/src/base/componentwise.rs b/src/base/componentwise.rs index 96f5d12a..c4ce1293 100644 --- a/src/base/componentwise.rs +++ b/src/base/componentwise.rs @@ -14,7 +14,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixSum, Scalar}; /// The type of the result of a matrix component-wise operation. pub type MatrixComponentOp = MatrixSum; -impl> Matrix { +impl> Matrix { /// Computes the component-wise absolute value. /// /// # Example @@ -45,7 +45,7 @@ impl> Matrix macro_rules! component_binop_impl( ($($binop: ident, $binop_mut: ident, $binop_assign: ident, $cmpy: ident, $Trait: ident . $op: ident . $op_assign: ident, $desc:expr, $desc_cmpy:expr, $desc_mut:expr);* $(;)*) => {$( - impl> Matrix { + impl> Matrix { #[doc = $desc] #[inline] pub fn $binop(&self, rhs: &Matrix) -> MatrixComponentOp @@ -70,7 +70,7 @@ macro_rules! component_binop_impl( } } - impl> Matrix { + impl> Matrix { // componentwise binop plus Y. #[doc = $desc_cmpy] #[inline] diff --git a/src/base/construction.rs b/src/base/construction.rs index 6925351d..f6056576 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -27,7 +27,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vec * Generic constructors. * */ -impl MatrixMN +impl MatrixMN where DefaultAllocator: Allocator { /// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics @@ -286,7 +286,7 @@ where DefaultAllocator: Allocator impl MatrixN where - N: Scalar + Clone, + N: Scalar, DefaultAllocator: Allocator, { /// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0. @@ -330,7 +330,7 @@ where */ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl MatrixMN + impl MatrixMN where DefaultAllocator: Allocator { /// Creates a new uninitialized matrix or vector. @@ -559,7 +559,7 @@ macro_rules! impl_constructors( } } - impl MatrixMN + impl MatrixMN where DefaultAllocator: Allocator, Standard: Distribution { @@ -603,7 +603,7 @@ impl_constructors!(Dynamic, Dynamic; */ macro_rules! impl_constructors_from_data( ($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl MatrixMN + impl MatrixMN where DefaultAllocator: Allocator { /// Creates a matrix with its elements filled with the components provided by a slice /// in row-major order. @@ -721,7 +721,7 @@ impl_constructors_from_data!(data; Dynamic, Dynamic; */ impl Zero for MatrixMN where - N: Scalar + Clone + Zero + ClosedAdd, + N: Scalar + Zero + ClosedAdd, DefaultAllocator: Allocator, { #[inline] @@ -737,7 +737,7 @@ where impl One for MatrixN where - N: Scalar + Clone + Zero + One + ClosedMul + ClosedAdd, + N: Scalar + Zero + One + ClosedMul + ClosedAdd, DefaultAllocator: Allocator, { #[inline] @@ -748,7 +748,7 @@ where impl Bounded for MatrixMN where - N: Scalar + Clone + Bounded, + N: Scalar + Bounded, DefaultAllocator: Allocator, { #[inline] @@ -762,7 +762,7 @@ where } } -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -781,7 +781,7 @@ impl Arbitrary for MatrixMN where R: Dim, C: Dim, - N: Scalar + Clone + Arbitrary + Send, + N: Scalar + Arbitrary + Send, DefaultAllocator: Allocator, Owned: Clone + Send, { @@ -822,7 +822,7 @@ where macro_rules! componentwise_constructors_impl( ($($R: ty, $C: ty, $($args: ident:($irow: expr,$icol: expr)),*);* $(;)*) => {$( impl MatrixMN - where N: Scalar + Clone, + where N: Scalar, DefaultAllocator: Allocator { /// Initializes this matrix from its components. #[inline] @@ -990,7 +990,7 @@ componentwise_constructors_impl!( */ impl VectorN where - N: Scalar + Clone + Zero + One, + N: Scalar + Zero + One, DefaultAllocator: Allocator, { /// The column vector with a 1 as its first component, and zero elsewhere. diff --git a/src/base/construction_slice.rs b/src/base/construction_slice.rs index 0e1d28f8..4f745a65 100644 --- a/src/base/construction_slice.rs +++ b/src/base/construction_slice.rs @@ -8,7 +8,7 @@ use num_rational::Ratio; * Slice constructors. * */ -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMN<'a, N, R, C, RStride, CStride> { /// Creates, without bound-checking, a matrix slice from an array and with dimensions and strides specified by generic types instances. @@ -61,7 +61,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> } } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMutMN<'a, N, R, C, RStride, CStride> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions and strides specified by generic types instances. @@ -133,7 +133,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> } } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> { +impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> { /// Creates, without bound-checking, a matrix slice from an array and with dimensions specified by generic types instances. /// /// This method is unsafe because the input data array is not checked to contain enough elements. @@ -159,7 +159,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim> MatrixSliceMN<'a, N, R, C> { } } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> { +impl<'a, N: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions specified by generic types instances. /// /// This method is unsafe because the input data array is not checked to contain enough elements. @@ -187,7 +187,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim> MatrixSliceMutMN<'a, N, R, C> { macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, N: Scalar + Clone, $($DimIdent: $DimBound),*> MatrixSliceMN<'a, N, $($Dims),*> { + impl<'a, N: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMN<'a, N, $($Dims),*> { /// Creates a new matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -203,7 +203,7 @@ macro_rules! impl_constructors( } } - impl<'a, N: Scalar + Clone, $($DimIdent: $DimBound, )*> MatrixSliceMN<'a, N, $($Dims,)* Dynamic, Dynamic> { + impl<'a, N: Scalar, $($DimIdent: $DimBound, )*> MatrixSliceMN<'a, N, $($Dims,)* Dynamic, Dynamic> { /// Creates a new matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -244,7 +244,7 @@ impl_constructors!(Dynamic, Dynamic; macro_rules! impl_constructors_mut( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, N: Scalar + Clone, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> { + impl<'a, N: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, N, $($Dims),*> { /// Creates a new mutable matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -260,7 +260,7 @@ macro_rules! impl_constructors_mut( } } - impl<'a, N: Scalar + Clone, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, N, $($Dims,)* Dynamic, Dynamic> { + impl<'a, N: Scalar, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, N, $($Dims,)* Dynamic, Dynamic> { /// Creates a new mutable matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 4b937556..7763a086 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -31,8 +31,8 @@ where C1: Dim, R2: Dim, C2: Dim, - N1: Scalar + Clone, - N2: Scalar + Clone + SupersetOf, + N1: Scalar, + N2: Scalar + SupersetOf, DefaultAllocator: Allocator + Allocator + SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -75,7 +75,7 @@ where } } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix { +impl<'a, N: Scalar, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix { type Item = &'a N; type IntoIter = MatrixIter<'a, N, R, C, S>; @@ -85,7 +85,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: Storage> IntoIterator fo } } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut> IntoIterator +impl<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut> IntoIterator for &'a mut Matrix { type Item = &'a mut N; @@ -100,7 +100,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut> IntoIterator macro_rules! impl_from_into_asref_1D( ($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$( impl From<[N; $SZ]> for MatrixMN - where N: Scalar + Clone, + where N: Scalar, DefaultAllocator: Allocator { #[inline] fn from(arr: [N; $SZ]) -> Self { @@ -114,7 +114,7 @@ macro_rules! impl_from_into_asref_1D( } impl Into<[N; $SZ]> for Matrix - where N: Scalar + Clone, + where N: Scalar, S: ContiguousStorage { #[inline] fn into(self) -> [N; $SZ] { @@ -128,7 +128,7 @@ macro_rules! impl_from_into_asref_1D( } impl AsRef<[N; $SZ]> for Matrix - where N: Scalar + Clone, + where N: Scalar, S: ContiguousStorage { #[inline] fn as_ref(&self) -> &[N; $SZ] { @@ -139,7 +139,7 @@ macro_rules! impl_from_into_asref_1D( } impl AsMut<[N; $SZ]> for Matrix - where N: Scalar + Clone, + where N: Scalar, S: ContiguousStorageMut { #[inline] fn as_mut(&mut self) -> &mut [N; $SZ] { @@ -168,7 +168,7 @@ impl_from_into_asref_1D!( macro_rules! impl_from_into_asref_2D( ($(($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr));* $(;)*) => {$( - impl From<[[N; $SZRows]; $SZCols]> for MatrixMN + impl From<[[N; $SZRows]; $SZCols]> for MatrixMN where DefaultAllocator: Allocator { #[inline] fn from(arr: [[N; $SZRows]; $SZCols]) -> Self { @@ -181,7 +181,7 @@ macro_rules! impl_from_into_asref_2D( } } - impl Into<[[N; $SZRows]; $SZCols]> for Matrix + impl Into<[[N; $SZRows]; $SZCols]> for Matrix where S: ContiguousStorage { #[inline] fn into(self) -> [[N; $SZRows]; $SZCols] { @@ -194,7 +194,7 @@ macro_rules! impl_from_into_asref_2D( } } - impl AsRef<[[N; $SZRows]; $SZCols]> for Matrix + impl AsRef<[[N; $SZRows]; $SZCols]> for Matrix where S: ContiguousStorage { #[inline] fn as_ref(&self) -> &[[N; $SZRows]; $SZCols] { @@ -204,7 +204,7 @@ macro_rules! impl_from_into_asref_2D( } } - impl AsMut<[[N; $SZRows]; $SZCols]> for Matrix + impl AsMut<[[N; $SZRows]; $SZCols]> for Matrix where S: ContiguousStorageMut { #[inline] fn as_mut(&mut self) -> &mut [[N; $SZRows]; $SZCols] { @@ -229,7 +229,7 @@ impl_from_into_asref_2D!( macro_rules! impl_from_into_mint_1D( ($($NRows: ident => $VT:ident [$SZ: expr]);* $(;)*) => {$( impl From> for MatrixMN - where N: Scalar + Clone, + where N: Scalar, DefaultAllocator: Allocator { #[inline] fn from(v: mint::$VT) -> Self { @@ -243,7 +243,7 @@ macro_rules! impl_from_into_mint_1D( } impl Into> for Matrix - where N: Scalar + Clone, + where N: Scalar, S: ContiguousStorage { #[inline] fn into(self) -> mint::$VT { @@ -257,7 +257,7 @@ macro_rules! impl_from_into_mint_1D( } impl AsRef> for Matrix - where N: Scalar + Clone, + where N: Scalar, S: ContiguousStorage { #[inline] fn as_ref(&self) -> &mint::$VT { @@ -268,7 +268,7 @@ macro_rules! impl_from_into_mint_1D( } impl AsMut> for Matrix - where N: Scalar + Clone, + where N: Scalar, S: ContiguousStorageMut { #[inline] fn as_mut(&mut self) -> &mut mint::$VT { @@ -292,7 +292,7 @@ impl_from_into_mint_1D!( macro_rules! impl_from_into_mint_2D( ($(($NRows: ty, $NCols: ty) => $MV:ident{ $($component:ident),* }[$SZRows: expr]);* $(;)*) => {$( impl From> for MatrixMN - where N: Scalar + Clone, + where N: Scalar, DefaultAllocator: Allocator { #[inline] fn from(m: mint::$MV) -> Self { @@ -310,7 +310,7 @@ macro_rules! impl_from_into_mint_2D( } impl Into> for MatrixMN - where N: Scalar + Clone, + where N: Scalar, DefaultAllocator: Allocator { #[inline] fn into(self) -> mint::$MV { @@ -342,7 +342,7 @@ impl_from_into_mint_2D!( impl<'a, N, R, C, RStride, CStride> From> for Matrix> where - N: Scalar + Clone, + N: Scalar, R: DimName, C: DimName, RStride: Dim, @@ -359,7 +359,7 @@ where impl<'a, N, C, RStride, CStride> From> for Matrix> where - N: Scalar + Clone, + N: Scalar, C: Dim, RStride: Dim, CStride: Dim, @@ -373,7 +373,7 @@ where impl<'a, N, R, RStride, CStride> From> for Matrix> where - N: Scalar + Clone, + N: Scalar, R: DimName, RStride: Dim, CStride: Dim, @@ -386,7 +386,7 @@ where impl<'a, N, R, C, RStride, CStride> From> for Matrix> where - N: Scalar + Clone, + N: Scalar, R: DimName, C: DimName, RStride: Dim, @@ -403,7 +403,7 @@ where impl<'a, N, C, RStride, CStride> From> for Matrix> where - N: Scalar + Clone, + N: Scalar, C: Dim, RStride: Dim, CStride: Dim, @@ -417,7 +417,7 @@ where impl<'a, N, R, RStride, CStride> From> for Matrix> where - N: Scalar + Clone, + N: Scalar, R: DimName, RStride: Dim, CStride: Dim, @@ -430,7 +430,7 @@ where impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix> for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> where - N: Scalar + Clone, + N: Scalar, R: Dim, C: Dim, RSlice: Dim, @@ -463,7 +463,7 @@ for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> where - N: Scalar + Clone, + N: Scalar, R: Dim, C: Dim, RSlice: Dim, @@ -496,7 +496,7 @@ for MatrixSlice<'a, N, RSlice, CSlice, RStride, CStride> impl<'a, N, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> for MatrixSliceMut<'a, N, RSlice, CSlice, RStride, CStride> where - N: Scalar + Clone, + N: Scalar, R: Dim, C: Dim, RSlice: Dim, diff --git a/src/base/coordinates.rs b/src/base/coordinates.rs index ee6ccf91..832723e3 100644 --- a/src/base/coordinates.rs +++ b/src/base/coordinates.rs @@ -24,7 +24,7 @@ macro_rules! coords_impl( #[repr(C)] #[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] - pub struct $T { + pub struct $T { $(pub $comps: N),* } } @@ -32,7 +32,7 @@ macro_rules! coords_impl( macro_rules! deref_impl( ($R: ty, $C: ty; $Target: ident) => { - impl Deref for Matrix + impl Deref for Matrix where S: ContiguousStorage { type Target = $Target; @@ -42,7 +42,7 @@ macro_rules! deref_impl( } } - impl DerefMut for Matrix + impl DerefMut for Matrix where S: ContiguousStorageMut { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 4413de27..c07c8708 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -36,7 +36,7 @@ pub struct DefaultAllocator; // Static - Static impl Allocator for DefaultAllocator where - N: Scalar + Clone, + N: Scalar, R: DimName, C: DimName, R::Value: Mul, @@ -76,7 +76,7 @@ where // Dynamic - Static // Dynamic - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; #[inline] @@ -107,7 +107,7 @@ impl Allocator for DefaultAllocator { // Static - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Allocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; #[inline] @@ -142,7 +142,7 @@ impl Allocator for DefaultAllocato * */ // Anything -> Static × Static -impl Reallocator for DefaultAllocator +impl Reallocator for DefaultAllocator where RFrom: Dim, CFrom: Dim, @@ -173,7 +173,7 @@ where // Static × Static -> Dynamic × Any #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator for DefaultAllocator +impl Reallocator for DefaultAllocator where RFrom: DimName, CFrom: DimName, @@ -202,7 +202,7 @@ where // Static × Static -> Static × Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator for DefaultAllocator +impl Reallocator for DefaultAllocator where RFrom: DimName, CFrom: DimName, @@ -231,7 +231,7 @@ where // All conversion from a dynamic buffer to a dynamic buffer. #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -247,7 +247,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -263,7 +263,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -279,7 +279,7 @@ impl Reallocator Reallocator +impl Reallocator for DefaultAllocator { #[inline] diff --git a/src/base/edition.rs b/src/base/edition.rs index 19937e3f..b84a6110 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -18,7 +18,7 @@ use crate::base::storage::{Storage, StorageMut}; use crate::base::DMatrix; use crate::base::{DefaultAllocator, Matrix, MatrixMN, RowVector, Scalar, Vector}; -impl> Matrix { +impl> Matrix { /// Extracts the upper triangular part of this matrix (including the diagonal). #[inline] pub fn upper_triangle(&self) -> MatrixMN @@ -92,7 +92,7 @@ impl> Matrix> Matrix { +impl> Matrix { /// Sets all the elements of this matrix to `val`. #[inline] pub fn fill(&mut self, val: N) { @@ -253,7 +253,7 @@ impl> Matrix> Matrix { +impl> Matrix { /// Copies the upper-triangle of this matrix to its lower-triangular part. /// /// This makes the matrix symmetric. Panics if the matrix is not square. @@ -291,7 +291,7 @@ impl> Matrix { * FIXME: specialize all the following for slices. * */ -impl> Matrix { +impl> Matrix { /* * * Column removal. @@ -797,7 +797,7 @@ impl> Matrix } #[cfg(any(feature = "std", feature = "alloc"))] -impl DMatrix { +impl DMatrix { /// Resizes this matrix in-place. /// /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more @@ -814,7 +814,7 @@ impl DMatrix { } #[cfg(any(feature = "std", feature = "alloc"))] -impl MatrixMN +impl MatrixMN where DefaultAllocator: Allocator { /// Changes the number of rows of this matrix in-place. @@ -835,7 +835,7 @@ where DefaultAllocator: Allocator } #[cfg(any(feature = "std", feature = "alloc"))] -impl MatrixMN +impl MatrixMN where DefaultAllocator: Allocator { /// Changes the number of column of this matrix in-place. @@ -855,7 +855,7 @@ where DefaultAllocator: Allocator } } -unsafe fn compress_rows( +unsafe fn compress_rows( data: &mut [N], nrows: usize, ncols: usize, @@ -895,7 +895,7 @@ unsafe fn compress_rows( // Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index. // The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements. -unsafe fn extend_rows( +unsafe fn extend_rows( data: &mut [N], nrows: usize, ncols: usize, @@ -938,7 +938,7 @@ unsafe fn extend_rows( #[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where - N: Scalar + Clone, + N: Scalar, R: Dim, S: Extend, { @@ -986,7 +986,7 @@ where #[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where - N: Scalar + Clone, + N: Scalar, S: Extend, { /// Extend the number of rows of a `Vector` with elements @@ -1007,7 +1007,7 @@ where #[cfg(any(feature = "std", feature = "alloc"))] impl Extend> for Matrix where - N: Scalar + Clone, + N: Scalar, R: Dim, S: Extend>, RV: Dim, diff --git a/src/base/indexing.rs b/src/base/indexing.rs index 35b4a9ff..ca786530 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -267,7 +267,7 @@ fn dimrange_rangetoinclusive_usize() { } /// A helper trait used for indexing operations. -pub trait MatrixIndex<'a, N: Scalar + Clone, R: Dim, C: Dim, S: Storage>: Sized { +pub trait MatrixIndex<'a, N: Scalar, R: Dim, C: Dim, S: Storage>: Sized { /// The output type returned by methods. type Output : 'a; @@ -303,7 +303,7 @@ pub trait MatrixIndex<'a, N: Scalar + Clone, R: Dim, C: Dim, S: Storage } /// A helper trait used for indexing operations. -pub trait MatrixIndexMut<'a, N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut>: MatrixIndex<'a, N, R, C, S> { +pub trait MatrixIndexMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut>: MatrixIndex<'a, N, R, C, S> { /// The output type returned by methods. type OutputMut : 'a; @@ -432,7 +432,7 @@ pub trait MatrixIndexMut<'a, N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut> Matrix +impl> Matrix { /// Produces a view of the data at the given index, or /// `None` if the index is out of bounds. @@ -502,7 +502,7 @@ impl> Matrix impl<'a, N, R, C, S> MatrixIndex<'a, N, R, C, S> for usize where - N: Scalar + Clone, + N: Scalar, R: Dim, C: Dim, S: Storage @@ -524,7 +524,7 @@ where impl<'a, N, R, C, S> MatrixIndexMut<'a, N, R, C, S> for usize where - N: Scalar + Clone, + N: Scalar, R: Dim, C: Dim, S: StorageMut @@ -544,7 +544,7 @@ where impl<'a, N, R, C, S> MatrixIndex<'a, N, R, C, S> for (usize, usize) where - N: Scalar + Clone, + N: Scalar, R: Dim, C: Dim, S: Storage @@ -569,7 +569,7 @@ where impl<'a, N, R, C, S> MatrixIndexMut<'a, N, R, C, S> for (usize, usize) where - N: Scalar + Clone, + N: Scalar, R: Dim, C: Dim, S: StorageMut @@ -607,7 +607,7 @@ macro_rules! impl_index_pair { { impl<'a, N, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, N, $R, $C, S> for ($RIdx, $CIdx) where - N: Scalar + Clone, + N: Scalar, $R: Dim, $C: Dim, S: Storage, @@ -643,7 +643,7 @@ macro_rules! impl_index_pair { impl<'a, N, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndexMut<'a, N, $R, $C, S> for ($RIdx, $CIdx) where - N: Scalar + Clone, + N: Scalar, $R: Dim, $C: Dim, S: StorageMut, diff --git a/src/base/iter.rs b/src/base/iter.rs index 1032cd2a..65cdb20a 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -10,7 +10,7 @@ use crate::base::{Scalar, Matrix, MatrixSlice, MatrixSliceMut}; macro_rules! iterator { (struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => { /// An iterator through a dense matrix with arbitrary strides matrix. - pub struct $Name<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + $Storage> { + pub struct $Name<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> { ptr: $Ptr, inner_ptr: $Ptr, inner_end: $Ptr, @@ -21,7 +21,7 @@ macro_rules! iterator { // FIXME: we need to specialize for the case where the matrix storage is owned (in which // case the iterator is trivial because it does not have any stride). - impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, N, R, C, S> { + impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> $Name<'a, N, R, C, S> { /// Creates a new iterator for the given matrix storage. pub fn new(storage: $SRef) -> $Name<'a, N, R, C, S> { let shape = storage.shape(); @@ -58,7 +58,7 @@ macro_rules! iterator { } } - impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + $Storage> Iterator + impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> Iterator for $Name<'a, N, R, C, S> { type Item = $Ref; @@ -111,7 +111,7 @@ macro_rules! iterator { } } - impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator + impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator for $Name<'a, N, R, C, S> { #[inline] @@ -133,12 +133,12 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut N, &'a mut N, &'a */ #[derive(Clone)] /// An iterator through the rows of a matrix. -pub struct RowIter<'a, N: Scalar + Clone, R: Dim, C: Dim, S: Storage> { +pub struct RowIter<'a, N: Scalar, R: Dim, C: Dim, S: Storage> { mat: &'a Matrix, curr: usize } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, N, R, C, S> { +impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, N, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { RowIter { mat, curr: 0 @@ -147,7 +147,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, N, R, C, S> { +impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, N, R, C, S> { type Item = MatrixSlice<'a, N, U1, C, S::RStride, S::CStride>; #[inline] @@ -172,7 +172,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> Iterator f } } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for RowIter<'a, N, R, C, S> { +impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for RowIter<'a, N, R, C, S> { #[inline] fn len(&self) -> usize { self.mat.nrows() - self.curr @@ -181,13 +181,13 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> ExactSizeI /// An iterator through the mutable rows of a matrix. -pub struct RowIterMut<'a, N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut> { +pub struct RowIterMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix> } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, N, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { RowIterMut { mat, @@ -204,7 +204,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> RowIter } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut<'a, N, R, C, S> { type Item = MatrixSliceMut<'a, N, U1, C, S::RStride, S::CStride>; #[inline] @@ -229,7 +229,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> Iterato } } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for RowIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for RowIterMut<'a, N, R, C, S> { #[inline] fn len(&self) -> usize { self.nrows() - self.curr @@ -244,12 +244,12 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> ExactSi */ #[derive(Clone)] /// An iterator through the columns of a matrix. -pub struct ColumnIter<'a, N: Scalar + Clone, R: Dim, C: Dim, S: Storage> { +pub struct ColumnIter<'a, N: Scalar, R: Dim, C: Dim, S: Storage> { mat: &'a Matrix, curr: usize } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, N, R, C, S> { +impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, N, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { ColumnIter { mat, curr: 0 @@ -258,7 +258,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> ColumnIter } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a, N, R, C, S> { +impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a, N, R, C, S> { type Item = MatrixSlice<'a, N, R, U1, S::RStride, S::CStride>; #[inline] @@ -283,7 +283,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> Iterator f } } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for ColumnIter<'a, N, R, C, S> { +impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator for ColumnIter<'a, N, R, C, S> { #[inline] fn len(&self) -> usize { self.mat.ncols() - self.curr @@ -292,13 +292,13 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + Storage> ExactSizeI /// An iterator through the mutable columns of a matrix. -pub struct ColumnIterMut<'a, N: Scalar + Clone, R: Dim, C: Dim, S: StorageMut> { +pub struct ColumnIterMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix> } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, N, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { ColumnIterMut { mat, @@ -315,7 +315,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> ColumnI } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for ColumnIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for ColumnIterMut<'a, N, R, C, S> { type Item = MatrixSliceMut<'a, N, R, U1, S::RStride, S::CStride>; #[inline] @@ -340,7 +340,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> Iterato } } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for ColumnIterMut<'a, N, R, C, S> { +impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator for ColumnIterMut<'a, N, R, C, S> { #[inline] fn len(&self) -> usize { self.ncols() - self.curr diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 7682f955..f3b9c044 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -73,7 +73,7 @@ pub type MatrixCross = /// some concrete types for `N` and a compatible data storage type `S`). #[repr(C)] #[derive(Clone, Copy)] -pub struct Matrix { +pub struct Matrix { /// The data storage that contains all the matrix components and informations about its number /// of rows and column (if needed). pub data: S, @@ -81,7 +81,7 @@ pub struct Matrix { _phantoms: PhantomData<(N, R, C)>, } -impl fmt::Debug for Matrix { +impl fmt::Debug for Matrix { fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { formatter .debug_struct("Matrix") @@ -93,7 +93,7 @@ impl fmt::Debug for Matrix Serialize for Matrix where - N: Scalar + Clone, + N: Scalar, R: Dim, C: Dim, S: Serialize, @@ -107,7 +107,7 @@ where #[cfg(feature = "serde-serialize")] impl<'de, N, R, C, S> Deserialize<'de> for Matrix where - N: Scalar + Clone, + N: Scalar, R: Dim, C: Dim, S: Deserialize<'de>, @@ -122,7 +122,7 @@ where } #[cfg(feature = "abomonation-serialize")] -impl Abomonation for Matrix { +impl Abomonation for Matrix { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { self.data.entomb(writer) } @@ -136,7 +136,7 @@ impl Abomonation for Matrix Matrix { +impl Matrix { /// Creates a new matrix with the given data without statically checking that the matrix /// dimension matches the storage dimension. #[inline] @@ -148,7 +148,7 @@ impl Matrix { } } -impl> Matrix { +impl> Matrix { /// Creates a new matrix with the given data. #[inline] pub fn from_data(data: S) -> Self { @@ -413,7 +413,7 @@ impl> Matrix /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] - pub fn map N2>(&self, mut f: F) -> MatrixMN + pub fn map N2>(&self, mut f: F) -> MatrixMN where DefaultAllocator: Allocator { let (nrows, ncols) = self.data.shape(); @@ -434,7 +434,7 @@ impl> Matrix /// Returns a matrix containing the result of `f` applied to each of its entries. Unlike `map`, /// `f` also gets passed the row and column index, i.e. `f(row, col, value)`. #[inline] - pub fn map_with_location N2>( + pub fn map_with_location N2>( &self, mut f: F, ) -> MatrixMN @@ -462,8 +462,8 @@ impl> Matrix #[inline] pub fn zip_map(&self, rhs: &Matrix, mut f: F) -> MatrixMN where - N2: Scalar + Clone, - N3: Scalar + Clone, + N2: Scalar, + N3: Scalar, S2: Storage, F: FnMut(N, N2) -> N3, DefaultAllocator: Allocator, @@ -500,9 +500,9 @@ impl> Matrix mut f: F, ) -> MatrixMN where - N2: Scalar + Clone, - N3: Scalar + Clone, - N4: Scalar + Clone, + N2: Scalar, + N3: Scalar, + N4: Scalar, S2: Storage, S3: Storage, F: FnMut(N, N2, N3) -> N4, @@ -555,7 +555,7 @@ impl> Matrix #[inline] pub fn zip_fold(&self, rhs: &Matrix, init: Acc, mut f: impl FnMut(Acc, N, N2) -> Acc) -> Acc where - N2: Scalar + Clone, + N2: Scalar, R2: Dim, C2: Dim, S2: Storage, @@ -623,7 +623,7 @@ impl> Matrix } } -impl> Matrix { +impl> Matrix { /// Mutably iterates through this matrix coordinates. #[inline] pub fn iter_mut(&mut self) -> MatrixIterMut { @@ -797,7 +797,7 @@ impl> Matrix(&mut self, rhs: &Matrix, mut f: impl FnMut(N, N2) -> N) - where N2: Scalar + Clone, + where N2: Scalar, R2: Dim, C2: Dim, S2: Storage, @@ -825,11 +825,11 @@ impl> Matrix(&mut self, b: &Matrix, c: &Matrix, mut f: impl FnMut(N, N2, N3) -> N) - where N2: Scalar + Clone, + where N2: Scalar, R2: Dim, C2: Dim, S2: Storage, - N3: Scalar + Clone, + N3: Scalar, R3: Dim, C3: Dim, S3: Storage, @@ -859,7 +859,7 @@ impl> Matrix> Vector { +impl> Vector { /// Gets a reference to the i-th element of this column vector without bound checking. #[inline] pub unsafe fn vget_unchecked(&self, i: usize) -> &N { @@ -869,7 +869,7 @@ impl> Vector { } } -impl> Vector { +impl> Vector { /// Gets a mutable reference to the i-th element of this column vector without bound checking. #[inline] pub unsafe fn vget_unchecked_mut(&mut self, i: usize) -> &mut N { @@ -879,7 +879,7 @@ impl> Vector { } } -impl> Matrix { +impl> Matrix { /// Extracts a slice containing the entire matrix entries ordered column-by-columns. #[inline] pub fn as_slice(&self) -> &[N] { @@ -887,7 +887,7 @@ impl> Matrix> Matrix { +impl> Matrix { /// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns. #[inline] pub fn as_mut_slice(&mut self) -> &mut [N] { @@ -895,7 +895,7 @@ impl> Matrix } } -impl> Matrix { +impl> Matrix { /// Transposes the square matrix `self` in-place. pub fn transpose_mut(&mut self) { assert!( @@ -1052,7 +1052,7 @@ impl> Matrix { } } -impl> SquareMatrix { +impl> SquareMatrix { /// The diagonal of this matrix. #[inline] pub fn diagonal(&self) -> VectorN @@ -1064,7 +1064,7 @@ impl> SquareMatrix { /// /// This is a more efficient version of `self.diagonal().map(f)` since this /// allocates only once. - pub fn map_diagonal(&self, mut f: impl FnMut(N) -> N2) -> VectorN + pub fn map_diagonal(&self, mut f: impl FnMut(N) -> N2) -> VectorN where DefaultAllocator: Allocator { assert!( self.is_square(), @@ -1128,7 +1128,7 @@ impl> SquareMatrix { } } -impl + IsNotStaticOne, S: Storage> Matrix { +impl + IsNotStaticOne, S: Storage> Matrix { /// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and /// and setting the diagonal element to `1`. @@ -1144,7 +1144,7 @@ impl + IsNotStaticOne, S: Storage< } -impl, S: Storage> Vector { +impl, S: Storage> Vector { /// Computes the coordinates in projective space of this vector, i.e., appends a `0` to its /// coordinates. #[inline] @@ -1170,7 +1170,7 @@ impl, S: Storage> Vector } } -impl, S: Storage> Vector { +impl, S: Storage> Vector { /// Constructs a new vector of higher dimension by appending `element` to the end of `self`. #[inline] pub fn push(&self, element: N) -> VectorN> @@ -1188,7 +1188,7 @@ impl, S: Storage> Vector impl AbsDiffEq for Matrix where - N: Scalar + Clone + AbsDiffEq, + N: Scalar + AbsDiffEq, S: Storage, N::Epsilon: Copy, { @@ -1209,7 +1209,7 @@ where impl RelativeEq for Matrix where - N: Scalar + Clone + RelativeEq, + N: Scalar + RelativeEq, S: Storage, N::Epsilon: Copy, { @@ -1232,7 +1232,7 @@ where impl UlpsEq for Matrix where - N: Scalar + Clone + UlpsEq, + N: Scalar + UlpsEq, S: Storage, N::Epsilon: Copy, { @@ -1252,7 +1252,7 @@ where impl PartialOrd for Matrix where - N: Scalar + Clone + PartialOrd, + N: Scalar + PartialOrd, S: Storage, { #[inline] @@ -1340,13 +1340,13 @@ where impl Eq for Matrix where - N: Scalar + Clone + Eq, + N: Scalar + Eq, S: Storage, {} impl PartialEq for Matrix where - N: Scalar + Clone, + N: Scalar, S: Storage, { #[inline] @@ -1363,13 +1363,13 @@ macro_rules! impl_fmt { ($trait: path, $fmt_str_without_precision: expr, $fmt_str_with_precision: expr) => { impl $trait for Matrix where - N: Scalar + Clone + $trait, + N: Scalar + $trait, S: Storage, DefaultAllocator: Allocator, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { #[cfg(feature = "std")] - fn val_width(val: &N, f: &mut fmt::Formatter) -> usize { + fn val_width(val: &N, f: &mut fmt::Formatter) -> usize { match f.precision() { Some(precision) => format!($fmt_str_with_precision, val, precision).chars().count(), None => format!($fmt_str_without_precision, val).chars().count(), @@ -1377,7 +1377,7 @@ macro_rules! impl_fmt { } #[cfg(not(feature = "std"))] - fn val_width(_: &N, _: &mut fmt::Formatter) -> usize { + fn val_width(_: &N, _: &mut fmt::Formatter) -> usize { 4 } @@ -1454,7 +1454,7 @@ fn lower_exp() { ") } -impl> Matrix { +impl> Matrix { /// The perpendicular product between two 2D column vectors, i.e. `a.x * b.y - a.y * b.x`. #[inline] pub fn perp(&self, b: &Matrix) -> N @@ -1545,7 +1545,7 @@ impl> Matrix> Vector +impl> Vector where DefaultAllocator: Allocator { /// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`. @@ -1593,7 +1593,7 @@ impl> Matrix { } } -impl> +impl> Vector { /// Returns `self * (1.0 - t) + rhs * t`, i.e., the linear blend of the vectors x and y using the scalar value a. @@ -1683,7 +1683,7 @@ impl> Unit> { impl AbsDiffEq for Unit> where - N: Scalar + Clone + AbsDiffEq, + N: Scalar + AbsDiffEq, S: Storage, N::Epsilon: Copy, { @@ -1702,7 +1702,7 @@ where impl RelativeEq for Unit> where - N: Scalar + Clone + RelativeEq, + N: Scalar + RelativeEq, S: Storage, N::Epsilon: Copy, { @@ -1726,7 +1726,7 @@ where impl UlpsEq for Unit> where - N: Scalar + Clone + UlpsEq, + N: Scalar + UlpsEq, S: Storage, N::Epsilon: Copy, { @@ -1743,7 +1743,7 @@ where impl Hash for Matrix where - N: Scalar + Clone + Hash, + N: Scalar + Hash, R: Dim, C: Dim, S: Storage, diff --git a/src/base/matrix_alga.rs b/src/base/matrix_alga.rs index b275ce2e..ac6aced7 100644 --- a/src/base/matrix_alga.rs +++ b/src/base/matrix_alga.rs @@ -25,7 +25,7 @@ use crate::base::{DefaultAllocator, MatrixMN, MatrixN, Scalar}; */ impl Identity for MatrixMN where - N: Scalar + Clone + Zero, + N: Scalar + Zero, DefaultAllocator: Allocator, { #[inline] @@ -36,7 +36,7 @@ where impl AbstractMagma for MatrixMN where - N: Scalar + Clone + ClosedAdd, + N: Scalar + ClosedAdd, DefaultAllocator: Allocator, { #[inline] @@ -47,7 +47,7 @@ where impl TwoSidedInverse for MatrixMN where - N: Scalar + Clone + ClosedNeg, + N: Scalar + ClosedNeg, DefaultAllocator: Allocator, { #[inline] @@ -64,7 +64,7 @@ where macro_rules! inherit_additive_structure( ($($marker: ident<$operator: ident> $(+ $bounds: ident)*),* $(,)*) => {$( impl $marker<$operator> for MatrixMN - where N: Scalar + Clone + $marker<$operator> $(+ $bounds)*, + where N: Scalar + $marker<$operator> $(+ $bounds)*, DefaultAllocator: Allocator { } )*} ); @@ -80,7 +80,7 @@ inherit_additive_structure!( impl AbstractModule for MatrixMN where - N: Scalar + Clone + RingCommutative, + N: Scalar + RingCommutative, DefaultAllocator: Allocator, { type AbstractRing = N; @@ -93,7 +93,7 @@ where impl Module for MatrixMN where - N: Scalar + Clone + RingCommutative, + N: Scalar + RingCommutative, DefaultAllocator: Allocator, { type Ring = N; @@ -101,7 +101,7 @@ where impl VectorSpace for MatrixMN where - N: Scalar + Clone + Field, + N: Scalar + Field, DefaultAllocator: Allocator, { type Field = N; @@ -109,7 +109,7 @@ where impl FiniteDimVectorSpace for MatrixMN where - N: Scalar + Clone + Field, + N: Scalar + Field, DefaultAllocator: Allocator, { #[inline] @@ -329,7 +329,7 @@ where DefaultAllocator: Allocator */ impl Identity for MatrixN where - N: Scalar + Clone + Zero + One, + N: Scalar + Zero + One, DefaultAllocator: Allocator, { #[inline] @@ -340,7 +340,7 @@ where impl AbstractMagma for MatrixN where - N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Zero + One + ClosedAdd + ClosedMul, DefaultAllocator: Allocator, { #[inline] @@ -352,7 +352,7 @@ where macro_rules! impl_multiplicative_structure( ($($marker: ident<$operator: ident> $(+ $bounds: ident)*),* $(,)*) => {$( impl $marker<$operator> for MatrixN - where N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul + $marker<$operator> $(+ $bounds)*, + where N: Scalar + Zero + One + ClosedAdd + ClosedMul + $marker<$operator> $(+ $bounds)*, DefaultAllocator: Allocator { } )*} ); @@ -369,7 +369,7 @@ impl_multiplicative_structure!( */ impl MeetSemilattice for MatrixMN where - N: Scalar + Clone + MeetSemilattice, + N: Scalar + MeetSemilattice, DefaultAllocator: Allocator, { #[inline] @@ -380,7 +380,7 @@ where impl JoinSemilattice for MatrixMN where - N: Scalar + Clone + JoinSemilattice, + N: Scalar + JoinSemilattice, DefaultAllocator: Allocator, { #[inline] @@ -391,7 +391,7 @@ where impl Lattice for MatrixMN where - N: Scalar + Clone + Lattice, + N: Scalar + Lattice, DefaultAllocator: Allocator, { #[inline] diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 1ab3d487..be53034a 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -13,22 +13,22 @@ macro_rules! slice_storage_impl( ($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => { #[doc = $doc] #[derive(Debug)] - pub struct $T<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> { + pub struct $T<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> { ptr: $Ptr, shape: (R, C), strides: (RStride, CStride), _phantoms: PhantomData<$Ref>, } - unsafe impl<'a, N: Scalar + Clone + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send + unsafe impl<'a, N: Scalar + Send, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Send for $T<'a, N, R, C, RStride, CStride> {} - unsafe impl<'a, N: Scalar + Clone + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync + unsafe impl<'a, N: Scalar + Sync, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Sync for $T<'a, N, R, C, RStride, CStride> {} - impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, N, R, C, RStride, CStride> { + impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, N, R, C, RStride, CStride> { /// Create a new matrix slice without bound checking and from a raw pointer. #[inline] pub unsafe fn from_raw_parts(ptr: $Ptr, @@ -48,7 +48,7 @@ macro_rules! slice_storage_impl( } // Dynamic is arbitrary. It's just to be able to call the constructors with `Slice::` - impl<'a, N: Scalar + Clone, R: Dim, C: Dim> $T<'a, N, R, C, Dynamic, Dynamic> { + impl<'a, N: Scalar, R: Dim, C: Dim> $T<'a, N, R, C, Dynamic, Dynamic> { /// Create a new matrix slice without bound checking. #[inline] pub unsafe fn new_unchecked(storage: $SRef, start: (usize, usize), shape: (R, C)) @@ -89,12 +89,12 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut N as &'a mut N) ); -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy +impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy for SliceStorage<'a, N, R, C, RStride, CStride> { } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone +impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone for SliceStorage<'a, N, R, C, RStride, CStride> { #[inline] @@ -110,7 +110,7 @@ impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone macro_rules! storage_impl( ($($T: ident),* $(,)*) => {$( - unsafe impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage + unsafe impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage for $T<'a, N, R, C, RStride, CStride> { type RStride = RStride; @@ -178,7 +178,7 @@ macro_rules! storage_impl( storage_impl!(SliceStorage, SliceStorageMut); -unsafe impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut +unsafe impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut for SliceStorageMut<'a, N, R, C, RStride, CStride> { #[inline] @@ -198,15 +198,15 @@ unsafe impl<'a, N: Scalar + Clone, R: Dim, C: Dim, RStride: Dim, CStride: Dim> S } } -unsafe impl<'a, N: Scalar + Clone, R: Dim, CStride: Dim> ContiguousStorage for SliceStorage<'a, N, R, U1, U1, CStride> { } -unsafe impl<'a, N: Scalar + Clone, R: Dim, CStride: Dim> ContiguousStorage for SliceStorageMut<'a, N, R, U1, U1, CStride> { } -unsafe impl<'a, N: Scalar + Clone, R: Dim, CStride: Dim> ContiguousStorageMut for SliceStorageMut<'a, N, R, U1, U1, CStride> { } +unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorage for SliceStorage<'a, N, R, U1, U1, CStride> { } +unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorage for SliceStorageMut<'a, N, R, U1, U1, CStride> { } +unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorageMut for SliceStorageMut<'a, N, R, U1, U1, CStride> { } -unsafe impl<'a, N: Scalar + Clone, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorage<'a, N, R, C, U1, R> { } -unsafe impl<'a, N: Scalar + Clone, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorageMut<'a, N, R, C, U1, R> { } -unsafe impl<'a, N: Scalar + Clone, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut for SliceStorageMut<'a, N, R, C, U1, R> { } +unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorage<'a, N, R, C, U1, R> { } +unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage for SliceStorageMut<'a, N, R, C, U1, R> { } +unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut for SliceStorageMut<'a, N, R, C, U1, R> { } -impl> Matrix { +impl> Matrix { #[inline] fn assert_slice_index( &self, @@ -261,7 +261,7 @@ macro_rules! matrix_slice_impl( pub type $MatrixSlice<'a, N, R, C, RStride, CStride> = Matrix>; - impl> Matrix { + impl> Matrix { /* * * Row slicing. @@ -786,7 +786,7 @@ impl SliceRange for RangeFull { } } -impl> Matrix { +impl> Matrix { /// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed /// by the range `cols`. #[inline] @@ -827,7 +827,7 @@ impl> Matrix } } -impl> Matrix { +impl> Matrix { /// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns /// indexed by the range `cols`. pub fn slice_range_mut( @@ -871,7 +871,7 @@ impl> Matrix From> for MatrixSlice<'a, N, R, C, RStride, CStride> where - N: Scalar + Clone, + N: Scalar, R: Dim, C: Dim, RStride: Dim, diff --git a/src/base/ops.rs b/src/base/ops.rs index 9f785daa..b20cc74f 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -20,7 +20,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, MatrixSum, Scalar * Indexing. * */ -impl> Index for Matrix { +impl> Index for Matrix { type Output = N; #[inline] @@ -32,7 +32,7 @@ impl> Index for Ma impl Index<(usize, usize)> for Matrix where - N: Scalar + Clone, + N: Scalar, S: Storage, { type Output = N; @@ -50,7 +50,7 @@ where } // Mutable versions. -impl> IndexMut for Matrix { +impl> IndexMut for Matrix { #[inline] fn index_mut(&mut self, i: usize) -> &mut N { let ij = self.vector_to_matrix_index(i); @@ -60,7 +60,7 @@ impl> IndexMut impl IndexMut<(usize, usize)> for Matrix where - N: Scalar + Clone, + N: Scalar, S: StorageMut, { #[inline] @@ -82,7 +82,7 @@ where */ impl Neg for Matrix where - N: Scalar + Clone + ClosedNeg, + N: Scalar + ClosedNeg, S: Storage, DefaultAllocator: Allocator, { @@ -98,7 +98,7 @@ where impl<'a, N, R: Dim, C: Dim, S> Neg for &'a Matrix where - N: Scalar + Clone + ClosedNeg, + N: Scalar + ClosedNeg, S: Storage, DefaultAllocator: Allocator, { @@ -112,7 +112,7 @@ where impl Matrix where - N: Scalar + Clone + ClosedNeg, + N: Scalar + ClosedNeg, S: StorageMut, { /// Negates `self` in-place. @@ -137,7 +137,7 @@ macro_rules! componentwise_binop_impl( $method_to: ident, $method_to_statically_unchecked: ident) => { impl> Matrix - where N: Scalar + Clone + $bound { + where N: Scalar + $bound { /* * @@ -267,7 +267,7 @@ macro_rules! componentwise_binop_impl( impl<'b, N, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix> for Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + Clone + $bound, + N: Scalar + $bound, SA: Storage, SB: Storage, DefaultAllocator: SameShapeAllocator, @@ -285,7 +285,7 @@ macro_rules! componentwise_binop_impl( impl<'a, N, R1, C1, R2, C2, SA, SB> $Trait> for &'a Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + Clone + $bound, + N: Scalar + $bound, SA: Storage, SB: Storage, DefaultAllocator: SameShapeAllocator, @@ -303,7 +303,7 @@ macro_rules! componentwise_binop_impl( impl $Trait> for Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + Clone + $bound, + N: Scalar + $bound, SA: Storage, SB: Storage, DefaultAllocator: SameShapeAllocator, @@ -318,7 +318,7 @@ macro_rules! componentwise_binop_impl( impl<'a, 'b, N, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix> for &'a Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + Clone + $bound, + N: Scalar + $bound, SA: Storage, SB: Storage, DefaultAllocator: SameShapeAllocator, @@ -341,7 +341,7 @@ macro_rules! componentwise_binop_impl( impl<'b, N, R1, C1, R2, C2, SA, SB> $TraitAssign<&'b Matrix> for Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + Clone + $bound, + N: Scalar + $bound, SA: StorageMut, SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { @@ -354,7 +354,7 @@ macro_rules! componentwise_binop_impl( impl $TraitAssign> for Matrix where R1: Dim, C1: Dim, R2: Dim, C2: Dim, - N: Scalar + Clone + $bound, + N: Scalar + $bound, SA: StorageMut, SB: Storage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { @@ -376,7 +376,7 @@ componentwise_binop_impl!(Sub, sub, ClosedSub; impl iter::Sum for MatrixMN where - N: Scalar + Clone + ClosedAdd + Zero, + N: Scalar + ClosedAdd + Zero, DefaultAllocator: Allocator, { fn sum>>(iter: I) -> MatrixMN { @@ -386,7 +386,7 @@ where impl iter::Sum for MatrixMN where - N: Scalar + Clone + ClosedAdd + Zero, + N: Scalar + ClosedAdd + Zero, DefaultAllocator: Allocator, { /// # Example @@ -416,7 +416,7 @@ where impl<'a, N, R: DimName, C: DimName> iter::Sum<&'a MatrixMN> for MatrixMN where - N: Scalar + Clone + ClosedAdd + Zero, + N: Scalar + ClosedAdd + Zero, DefaultAllocator: Allocator, { fn sum>>(iter: I) -> MatrixMN { @@ -426,7 +426,7 @@ where impl<'a, N, C: Dim> iter::Sum<&'a MatrixMN> for MatrixMN where - N: Scalar + Clone + ClosedAdd + Zero, + N: Scalar + ClosedAdd + Zero, DefaultAllocator: Allocator, { /// # Example @@ -466,7 +466,7 @@ macro_rules! componentwise_scalarop_impl( ($Trait: ident, $method: ident, $bound: ident; $TraitAssign: ident, $method_assign: ident) => { impl $Trait for Matrix - where N: Scalar + Clone + $bound, + where N: Scalar + $bound, S: Storage, DefaultAllocator: Allocator { type Output = MatrixMN; @@ -490,7 +490,7 @@ macro_rules! componentwise_scalarop_impl( } impl<'a, N, R: Dim, C: Dim, S> $Trait for &'a Matrix - where N: Scalar + Clone + $bound, + where N: Scalar + $bound, S: Storage, DefaultAllocator: Allocator { type Output = MatrixMN; @@ -502,7 +502,7 @@ macro_rules! componentwise_scalarop_impl( } impl $TraitAssign for Matrix - where N: Scalar + Clone + $bound, + where N: Scalar + $bound, S: StorageMut { #[inline] fn $method_assign(&mut self, rhs: N) { @@ -561,7 +561,7 @@ left_scalar_mul_impl!(u8, u16, u32, u64, usize, i8, i16, i32, i64, isize, f32, f impl<'a, 'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix> for &'a Matrix where - N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Zero + One + ClosedAdd + ClosedMul, SA: Storage, SB: Storage, DefaultAllocator: Allocator, @@ -582,7 +582,7 @@ where impl<'a, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul> for &'a Matrix where - N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: Storage, DefaultAllocator: Allocator, @@ -599,7 +599,7 @@ where impl<'b, N, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> Mul<&'b Matrix> for Matrix where - N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: Storage, DefaultAllocator: Allocator, @@ -616,7 +616,7 @@ where impl Mul> for Matrix where - N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: Storage, DefaultAllocator: Allocator, @@ -638,7 +638,7 @@ where R1: Dim, C1: Dim, R2: Dim, - N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: ContiguousStorageMut + Clone, ShapeConstraint: AreMultipliable, @@ -655,7 +655,7 @@ where R1: Dim, C1: Dim, R2: Dim, - N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, SA: ContiguousStorageMut + Clone, ShapeConstraint: AreMultipliable, @@ -671,7 +671,7 @@ where // Transpose-multiplication. impl Matrix where - N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Zero + One + ClosedAdd + ClosedMul, SA: Storage, { /// Equivalent to `self.transpose() * rhs`. @@ -826,7 +826,7 @@ where } } -impl> Matrix { +impl> Matrix { /// Adds a scalar to `self`. #[inline] pub fn add_scalar(&self, rhs: N) -> MatrixMN @@ -848,7 +848,7 @@ impl> Matrix< impl iter::Product for MatrixN where - N: Scalar + Clone + Zero + One + ClosedMul + ClosedAdd, + N: Scalar + Zero + One + ClosedMul + ClosedAdd, DefaultAllocator: Allocator, { fn product>>(iter: I) -> MatrixN { @@ -858,7 +858,7 @@ where impl<'a, N, D: DimName> iter::Product<&'a MatrixN> for MatrixN where - N: Scalar + Clone + Zero + One + ClosedMul + ClosedAdd, + N: Scalar + Zero + One + ClosedMul + ClosedAdd, DefaultAllocator: Allocator, { fn product>>(iter: I) -> MatrixN { @@ -866,7 +866,7 @@ where } } -impl> Matrix { +impl> Matrix { #[inline(always)] fn xcmp(&self, abs: impl Fn(N) -> N2, ordering: Ordering) -> N2 where N2: Scalar + PartialOrd + Zero { diff --git a/src/base/properties.rs b/src/base/properties.rs index 74ddf8cf..8ca49568 100644 --- a/src/base/properties.rs +++ b/src/base/properties.rs @@ -9,7 +9,7 @@ use crate::base::dimension::{Dim, DimMin}; use crate::base::storage::Storage; use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix}; -impl> Matrix { +impl> Matrix { /// Indicates if this is an empty matrix. #[inline] pub fn is_empty(&self) -> bool { diff --git a/src/base/scalar.rs b/src/base/scalar.rs index 070dc0a7..a8008ddf 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -5,7 +5,7 @@ use std::fmt::Debug; /// The basic scalar type for all structures of `nalgebra`. /// /// This does not make any assumption on the algebraic properties of `Self`. -pub trait Scalar: PartialEq + Debug + Any { +pub trait Scalar: Clone + PartialEq + Debug + Any { #[inline] /// Tests if `Self` the same as the type `T` /// @@ -16,9 +16,9 @@ pub trait Scalar: PartialEq + Debug + Any { #[inline(always)] /// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway. - /// - /// Downstream crates need to implement this on any Clone Scalars, as a blanket impl would conflict with with the blanket Copy impl. - fn inlined_clone(&self) -> Self; + fn inlined_clone(&self) -> Self { + self.clone() + } } impl Scalar for T { diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 8a355c87..cb597f31 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -3,7 +3,7 @@ use alga::general::{Field, SupersetOf}; use crate::storage::Storage; use crate::allocator::Allocator; -impl> Matrix { +impl> Matrix { /// Returns a row vector where each element is the result of the application of `f` on the /// corresponding column of the original matrix. #[inline] @@ -54,7 +54,7 @@ impl> Matrix } } -impl, R: Dim, C: Dim, S: Storage> Matrix { +impl, R: Dim, C: Dim, S: Storage> Matrix { /* * * Sum computation. diff --git a/src/base/storage.rs b/src/base/storage.rs index f1b0177b..e7439552 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -36,7 +36,7 @@ pub type CStride = /// should **not** allow the user to modify the size of the underlying buffer with safe methods /// (for example the `VecStorage::data_mut` method is unsafe because the user could change the /// vector's size so that it no longer contains enough elements: this will lead to UB. -pub unsafe trait Storage: Debug + Sized { +pub unsafe trait Storage: Debug + Sized { /// The static stride of this storage's rows. type RStride: Dim; @@ -117,7 +117,7 @@ pub unsafe trait Storage: Debug + Sized /// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable /// matrix slice can provide mutable access to its elements even if it does not own its data (it /// contains only an internal reference to them). -pub unsafe trait StorageMut: Storage { +pub unsafe trait StorageMut: Storage { /// The matrix mutable data pointer. fn ptr_mut(&mut self) -> *mut N; @@ -175,7 +175,7 @@ pub unsafe trait StorageMut: Storage: +pub unsafe trait ContiguousStorage: Storage { } @@ -185,7 +185,7 @@ pub unsafe trait ContiguousStorage: /// The storage requirement means that for any value of `i` in `[0, nrows * ncols[`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorageMut: +pub unsafe trait ContiguousStorageMut: ContiguousStorage + StorageMut { } diff --git a/src/base/swizzle.rs b/src/base/swizzle.rs index ec33f8da..02e48834 100644 --- a/src/base/swizzle.rs +++ b/src/base/swizzle.rs @@ -5,7 +5,7 @@ use typenum::{self, Cmp, Greater}; macro_rules! impl_swizzle { ($( where $BaseDim: ident: $( $name: ident() -> $Result: ident[$($i: expr),+] ),+ ;)* ) => { $( - impl> Vector + impl> Vector where D::Value: Cmp { $( diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index ffb1ac0c..e0c092fb 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -102,7 +102,7 @@ impl Into> for VecStorage * Dynamic − Dynamic * */ -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where DefaultAllocator: Allocator { type RStride = U1; @@ -146,7 +146,7 @@ where DefaultAllocator: Allocator } } -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where DefaultAllocator: Allocator { type RStride = U1; @@ -195,7 +195,7 @@ where DefaultAllocator: Allocator * StorageMut, ContiguousStorage. * */ -unsafe impl StorageMut for VecStorage +unsafe impl StorageMut for VecStorage where DefaultAllocator: Allocator { #[inline] @@ -209,13 +209,13 @@ where DefaultAllocator: Allocator } } -unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator +unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator {} -unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator +unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator {} -unsafe impl StorageMut for VecStorage +unsafe impl StorageMut for VecStorage where DefaultAllocator: Allocator { #[inline] @@ -244,10 +244,10 @@ impl Abomonation for VecStorage { } } -unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator +unsafe impl ContiguousStorage for VecStorage where DefaultAllocator: Allocator {} -unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator +unsafe impl ContiguousStorageMut for VecStorage where DefaultAllocator: Allocator {} impl Extend for VecStorage @@ -270,7 +270,7 @@ impl Extend for VecStorage impl Extend> for VecStorage where - N: Scalar + Clone, + N: Scalar, R: Dim, RV: Dim, SV: Storage, diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index 8b9321f2..421b041a 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -12,7 +12,7 @@ use crate::linalg::givens::GivensRotation; /// A random orthogonal matrix. #[derive(Clone, Debug)] -pub struct RandomOrthogonal +pub struct RandomOrthogonal where DefaultAllocator: Allocator { m: MatrixN, diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index 503d8689..47e3ca60 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -13,7 +13,7 @@ use crate::debug::RandomOrthogonal; /// A random, well-conditioned, symmetric definite-positive matrix. #[derive(Clone, Debug)] -pub struct RandomSDP +pub struct RandomSDP where DefaultAllocator: Allocator { m: MatrixN, diff --git a/src/geometry/op_macros.rs b/src/geometry/op_macros.rs index 2b12a8c9..382afe06 100644 --- a/src/geometry/op_macros.rs +++ b/src/geometry/op_macros.rs @@ -18,7 +18,7 @@ macro_rules! md_impl( // Lifetime. $($lives: tt),*) => { impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs - where N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*, + where N: Scalar + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*, DefaultAllocator: Allocator + Allocator + Allocator, @@ -96,7 +96,7 @@ macro_rules! md_assign_impl( // Actual implementation and lifetimes. $action: expr; $($lives: tt),*) => { impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs - where N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*, + where N: Scalar + Zero + One + ClosedAdd + ClosedMul $($(+ $ScalarBounds)*)*, DefaultAllocator: Allocator + Allocator, $( $ConstraintType: $ConstraintBound $(<$( $ConstraintBoundParams $( = $EqBound )*),*>)* ),* @@ -148,7 +148,7 @@ macro_rules! add_sub_impl( $lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty, Output = $Result: ty; $action: expr; $($lives: tt),*) => { impl<$($lives ,)* N $(, $Dims: $DimsBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs - where N: Scalar + Clone + $bound, + where N: Scalar + $bound, DefaultAllocator: Allocator + Allocator + SameShapeAllocator, @@ -172,7 +172,7 @@ macro_rules! add_sub_assign_impl( $lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty; $action: expr; $($lives: tt),*) => { impl<$($lives ,)* N $(, $Dims: $DimsBound)*> $Op<$Rhs> for $Lhs - where N: Scalar + Clone + $bound, + where N: Scalar + $bound, DefaultAllocator: Allocator + Allocator, ShapeConstraint: SameNumberOfRows<$R1, $R2> + SameNumberOfColumns<$C1, $C2> { diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index ec4575fa..8020c0cf 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -18,7 +18,7 @@ use crate::base::{Matrix4, Scalar, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; /// A 3D perspective projection stored as an homogeneous 4x4 matrix. -pub struct Perspective3 { +pub struct Perspective3 { matrix: Matrix4, } diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 6911db78..bc1b138a 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -20,14 +20,14 @@ use crate::base::{DefaultAllocator, Scalar, VectorN}; /// A point in a n-dimensional euclidean space. #[repr(C)] #[derive(Debug, Clone)] -pub struct Point +pub struct Point where DefaultAllocator: Allocator { /// The coordinates of this point, i.e., the shift from the origin. pub coords: VectorN, } -impl hash::Hash for Point +impl hash::Hash for Point where DefaultAllocator: Allocator, >::Buffer: hash::Hash, @@ -45,7 +45,7 @@ where } #[cfg(feature = "serde-serialize")] -impl Serialize for Point +impl Serialize for Point where DefaultAllocator: Allocator, >::Buffer: Serialize, @@ -57,7 +57,7 @@ where } #[cfg(feature = "serde-serialize")] -impl<'a, N: Scalar + Clone, D: DimName> Deserialize<'a> for Point +impl<'a, N: Scalar, D: DimName> Deserialize<'a> for Point where DefaultAllocator: Allocator, >::Buffer: Deserialize<'a>, @@ -73,7 +73,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Point where - N: Scalar + Clone, + N: Scalar, D: DimName, VectorN: Abomonation, DefaultAllocator: Allocator, @@ -91,7 +91,7 @@ where } } -impl Point +impl Point where DefaultAllocator: Allocator { /// Converts this point into a vector in homogeneous coordinates, i.e., appends a `1` at the @@ -210,7 +210,7 @@ where DefaultAllocator: Allocator } } -impl AbsDiffEq for Point +impl AbsDiffEq for Point where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -228,7 +228,7 @@ where } } -impl RelativeEq for Point +impl RelativeEq for Point where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -251,7 +251,7 @@ where } } -impl UlpsEq for Point +impl UlpsEq for Point where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -267,9 +267,9 @@ where } } -impl Eq for Point where DefaultAllocator: Allocator {} +impl Eq for Point where DefaultAllocator: Allocator {} -impl PartialEq for Point +impl PartialEq for Point where DefaultAllocator: Allocator { #[inline] @@ -278,7 +278,7 @@ where DefaultAllocator: Allocator } } -impl PartialOrd for Point +impl PartialOrd for Point where DefaultAllocator: Allocator { #[inline] @@ -312,7 +312,7 @@ where DefaultAllocator: Allocator * Display * */ -impl fmt::Display for Point +impl fmt::Display for Point where DefaultAllocator: Allocator { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { diff --git a/src/geometry/point_alga.rs b/src/geometry/point_alga.rs index b5ae46b8..162e6c68 100644 --- a/src/geometry/point_alga.rs +++ b/src/geometry/point_alga.rs @@ -7,9 +7,9 @@ use crate::base::{DefaultAllocator, Scalar, VectorN}; use crate::geometry::Point; -impl AffineSpace for Point +impl AffineSpace for Point where - N: Scalar + Clone + Field, + N: Scalar + Field, DefaultAllocator: Allocator, { type Translation = VectorN; @@ -49,7 +49,7 @@ where DefaultAllocator: Allocator */ impl MeetSemilattice for Point where - N: Scalar + Clone + MeetSemilattice, + N: Scalar + MeetSemilattice, DefaultAllocator: Allocator, { #[inline] @@ -60,7 +60,7 @@ where impl JoinSemilattice for Point where - N: Scalar + Clone + JoinSemilattice, + N: Scalar + JoinSemilattice, DefaultAllocator: Allocator, { #[inline] @@ -71,7 +71,7 @@ where impl Lattice for Point where - N: Scalar + Clone + Lattice, + N: Scalar + Lattice, DefaultAllocator: Allocator, { #[inline] diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 5ba9bc2e..e5d2ee44 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -12,7 +12,7 @@ use crate::base::{DefaultAllocator, Scalar, VectorN}; use crate::geometry::Point; -impl Point +impl Point where DefaultAllocator: Allocator { /// Creates a new point with uninitialized coordinates. @@ -94,7 +94,7 @@ where DefaultAllocator: Allocator #[inline] pub fn from_homogeneous(v: VectorN>) -> Option where - N: Scalar + Clone + Zero + One + ClosedDiv, + N: Scalar + Zero + One + ClosedDiv, D: DimNameAdd, DefaultAllocator: Allocator>, { @@ -112,7 +112,7 @@ where DefaultAllocator: Allocator * Traits that build points. * */ -impl Bounded for Point +impl Bounded for Point where DefaultAllocator: Allocator { #[inline] @@ -126,7 +126,7 @@ where DefaultAllocator: Allocator } } -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -138,7 +138,7 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for Point +impl Arbitrary for Point where DefaultAllocator: Allocator, >::Buffer: Send, @@ -156,7 +156,7 @@ where */ macro_rules! componentwise_constructors_impl( ($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$( - impl Point + impl Point where DefaultAllocator: Allocator { #[doc = "Initializes this point from its components."] #[doc = "# Example\n```"] @@ -192,7 +192,7 @@ componentwise_constructors_impl!( macro_rules! from_array_impl( ($($D: ty, $len: expr);*) => {$( - impl From<[N; $len]> for Point { + impl From<[N; $len]> for Point { fn from (coords: [N; $len]) -> Self { Self { coords: coords.into() diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index d2c5a8c0..4f32d840 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -27,8 +27,8 @@ use std::convert::{AsMut, AsRef, From, Into}; impl SubsetOf> for Point where D: DimName, - N1: Scalar + Clone, - N2: Scalar + Clone + SupersetOf, + N1: Scalar, + N2: Scalar + SupersetOf, DefaultAllocator: Allocator + Allocator, { #[inline] @@ -52,8 +52,8 @@ where impl SubsetOf>> for Point where D: DimNameAdd, - N1: Scalar + Clone, - N2: Scalar + Clone + Zero + One + ClosedDiv + SupersetOf, + N1: Scalar, + N2: Scalar + Zero + One + ClosedDiv + SupersetOf, DefaultAllocator: Allocator + Allocator> + Allocator> @@ -83,7 +83,7 @@ where macro_rules! impl_from_into_mint_1D( ($($NRows: ident => $PT:ident, $VT:ident [$SZ: expr]);* $(;)*) => {$( impl From> for Point - where N: Scalar + Clone { + where N: Scalar { #[inline] fn from(p: mint::$PT) -> Self { Self { @@ -93,7 +93,7 @@ macro_rules! impl_from_into_mint_1D( } impl Into> for Point - where N: Scalar + Clone { + where N: Scalar { #[inline] fn into(self) -> mint::$PT { let mint_vec: mint::$VT = self.coords.into(); @@ -102,7 +102,7 @@ macro_rules! impl_from_into_mint_1D( } impl AsRef> for Point - where N: Scalar + Clone { + where N: Scalar { #[inline] fn as_ref(&self) -> &mint::$PT { unsafe { @@ -112,7 +112,7 @@ macro_rules! impl_from_into_mint_1D( } impl AsMut> for Point - where N: Scalar + Clone { + where N: Scalar { #[inline] fn as_mut(&mut self) -> &mut mint::$PT { unsafe { @@ -130,7 +130,7 @@ impl_from_into_mint_1D!( U3 => Point3, Vector3[3]; ); -impl From> for VectorN> +impl From> for VectorN> where D: DimNameAdd, DefaultAllocator: Allocator + Allocator>, @@ -141,7 +141,7 @@ where } } -impl From> for Point +impl From> for Point where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_coordinates.rs b/src/geometry/point_coordinates.rs index 0ccc9441..b56e120e 100644 --- a/src/geometry/point_coordinates.rs +++ b/src/geometry/point_coordinates.rs @@ -16,7 +16,7 @@ use crate::geometry::Point; macro_rules! deref_impl( ($D: ty, $Target: ident $(, $comps: ident)*) => { - impl Deref for Point + impl Deref for Point where DefaultAllocator: Allocator { type Target = $Target; @@ -26,7 +26,7 @@ macro_rules! deref_impl( } } - impl DerefMut for Point + impl DerefMut for Point where DefaultAllocator: Allocator { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/geometry/point_ops.rs b/src/geometry/point_ops.rs index 4f08f526..648a71a6 100644 --- a/src/geometry/point_ops.rs +++ b/src/geometry/point_ops.rs @@ -18,7 +18,7 @@ use crate::geometry::Point; * Indexing. * */ -impl Index for Point +impl Index for Point where DefaultAllocator: Allocator { type Output = N; @@ -29,7 +29,7 @@ where DefaultAllocator: Allocator } } -impl IndexMut for Point +impl IndexMut for Point where DefaultAllocator: Allocator { #[inline] @@ -43,7 +43,7 @@ where DefaultAllocator: Allocator * Neg. * */ -impl Neg for Point +impl Neg for Point where DefaultAllocator: Allocator { type Output = Self; @@ -54,7 +54,7 @@ where DefaultAllocator: Allocator } } -impl<'a, N: Scalar + Clone + ClosedNeg, D: DimName> Neg for &'a Point +impl<'a, N: Scalar + ClosedNeg, D: DimName> Neg for &'a Point where DefaultAllocator: Allocator { type Output = Point; @@ -138,7 +138,7 @@ add_sub_impl!(Add, add, ClosedAdd; macro_rules! op_assign_impl( ($($TraitAssign: ident, $method_assign: ident, $bound: ident);* $(;)*) => {$( impl<'b, N, D1: DimName, D2: Dim, SB> $TraitAssign<&'b Vector> for Point - where N: Scalar + Clone + $bound, + where N: Scalar + $bound, SB: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows { @@ -150,7 +150,7 @@ macro_rules! op_assign_impl( } impl $TraitAssign> for Point - where N: Scalar + Clone + $bound, + where N: Scalar + $bound, SB: Storage, DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows { @@ -192,7 +192,7 @@ md_impl_all!( macro_rules! componentwise_scalarop_impl( ($Trait: ident, $method: ident, $bound: ident; $TraitAssign: ident, $method_assign: ident) => { - impl $Trait for Point + impl $Trait for Point where DefaultAllocator: Allocator { type Output = Point; @@ -202,7 +202,7 @@ macro_rules! componentwise_scalarop_impl( } } - impl<'a, N: Scalar + Clone + $bound, D: DimName> $Trait for &'a Point + impl<'a, N: Scalar + $bound, D: DimName> $Trait for &'a Point where DefaultAllocator: Allocator { type Output = Point; @@ -212,7 +212,7 @@ macro_rules! componentwise_scalarop_impl( } } - impl $TraitAssign for Point + impl $TraitAssign for Point where DefaultAllocator: Allocator { #[inline] fn $method_assign(&mut self, right: N) { diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index 585fe57b..b4658a11 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -8,7 +8,7 @@ use crate::storage::{Storage, StorageMut}; use crate::geometry::Point; /// A reflection wrt. a plane. -pub struct Reflection> { +pub struct Reflection> { axis: Vector, bias: N, } diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 9ee8511a..afbeeb3f 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -24,13 +24,13 @@ use crate::geometry::Point; /// A rotation matrix. #[repr(C)] #[derive(Debug)] -pub struct Rotation +pub struct Rotation where DefaultAllocator: Allocator { matrix: MatrixN, } -impl hash::Hash for Rotation +impl hash::Hash for Rotation where DefaultAllocator: Allocator, >::Buffer: hash::Hash, @@ -47,7 +47,7 @@ where { } -impl Clone for Rotation +impl Clone for Rotation where DefaultAllocator: Allocator, >::Buffer: Clone, @@ -61,7 +61,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Rotation where - N: Scalar + Clone, + N: Scalar, D: DimName, MatrixN: Abomonation, DefaultAllocator: Allocator, @@ -80,7 +80,7 @@ where } #[cfg(feature = "serde-serialize")] -impl Serialize for Rotation +impl Serialize for Rotation where DefaultAllocator: Allocator, Owned: Serialize, @@ -92,7 +92,7 @@ where } #[cfg(feature = "serde-serialize")] -impl<'a, N: Scalar + Clone, D: DimName> Deserialize<'a> for Rotation +impl<'a, N: Scalar, D: DimName> Deserialize<'a> for Rotation where DefaultAllocator: Allocator, Owned: Deserialize<'a>, @@ -105,7 +105,7 @@ where } } -impl Rotation +impl Rotation where DefaultAllocator: Allocator { /// A reference to the underlying matrix representation of this rotation. @@ -432,9 +432,9 @@ where DefaultAllocator: Allocator + Allocator } } -impl Eq for Rotation where DefaultAllocator: Allocator {} +impl Eq for Rotation where DefaultAllocator: Allocator {} -impl PartialEq for Rotation +impl PartialEq for Rotation where DefaultAllocator: Allocator { #[inline] @@ -445,7 +445,7 @@ where DefaultAllocator: Allocator impl AbsDiffEq for Rotation where - N: Scalar + Clone + AbsDiffEq, + N: Scalar + AbsDiffEq, DefaultAllocator: Allocator, N::Epsilon: Copy, { @@ -464,7 +464,7 @@ where impl RelativeEq for Rotation where - N: Scalar + Clone + RelativeEq, + N: Scalar + RelativeEq, DefaultAllocator: Allocator, N::Epsilon: Copy, { @@ -488,7 +488,7 @@ where impl UlpsEq for Rotation where - N: Scalar + Clone + UlpsEq, + N: Scalar + UlpsEq, DefaultAllocator: Allocator, N::Epsilon: Copy, { diff --git a/src/geometry/rotation_construction.rs b/src/geometry/rotation_construction.rs index 514ed439..a7779cc6 100644 --- a/src/geometry/rotation_construction.rs +++ b/src/geometry/rotation_construction.rs @@ -10,7 +10,7 @@ use crate::geometry::Rotation; impl Rotation where - N: Scalar + Clone + Zero + One, + N: Scalar + Zero + One, DefaultAllocator: Allocator, { /// Creates a new square identity rotation of the given `dimension`. @@ -32,7 +32,7 @@ where impl One for Rotation where - N: Scalar + Clone + Zero + One + ClosedAdd + ClosedMul, + N: Scalar + Zero + One + ClosedAdd + ClosedMul, DefaultAllocator: Allocator, { #[inline] diff --git a/src/geometry/rotation_ops.rs b/src/geometry/rotation_ops.rs index 553d8c62..ed555b6b 100644 --- a/src/geometry/rotation_ops.rs +++ b/src/geometry/rotation_ops.rs @@ -30,7 +30,7 @@ use crate::base::{DefaultAllocator, Matrix, MatrixMN, Scalar, Unit, Vector, Vect use crate::geometry::{Point, Rotation}; -impl Index<(usize, usize)> for Rotation +impl Index<(usize, usize)> for Rotation where DefaultAllocator: Allocator { type Output = N; diff --git a/src/geometry/swizzle.rs b/src/geometry/swizzle.rs index 149bcf02..9ec6b2e5 100644 --- a/src/geometry/swizzle.rs +++ b/src/geometry/swizzle.rs @@ -6,7 +6,7 @@ use typenum::{self, Cmp, Greater}; macro_rules! impl_swizzle { ($( where $BaseDim: ident: $( $name: ident() -> $Result: ident[$($i: expr),+] ),+ ;)* ) => { $( - impl Point + impl Point where DefaultAllocator: Allocator, D::Value: Cmp diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index 1540da09..79c1ad8b 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -23,7 +23,7 @@ use crate::geometry::Point; /// A translation. #[repr(C)] #[derive(Debug)] -pub struct Translation +pub struct Translation where DefaultAllocator: Allocator { /// The translation coordinates, i.e., how much is added to a point's coordinates when it is @@ -31,7 +31,7 @@ where DefaultAllocator: Allocator pub vector: VectorN, } -impl hash::Hash for Translation +impl hash::Hash for Translation where DefaultAllocator: Allocator, Owned: hash::Hash, @@ -47,7 +47,7 @@ where Owned: Copy, {} -impl Clone for Translation +impl Clone for Translation where DefaultAllocator: Allocator, Owned: Clone, @@ -61,7 +61,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Translation where - N: Scalar + Clone, + N: Scalar, D: DimName, VectorN: Abomonation, DefaultAllocator: Allocator, @@ -80,7 +80,7 @@ where } #[cfg(feature = "serde-serialize")] -impl Serialize for Translation +impl Serialize for Translation where DefaultAllocator: Allocator, Owned: Serialize, @@ -92,7 +92,7 @@ where } #[cfg(feature = "serde-serialize")] -impl<'a, N: Scalar + Clone, D: DimName> Deserialize<'a> for Translation +impl<'a, N: Scalar, D: DimName> Deserialize<'a> for Translation where DefaultAllocator: Allocator, Owned: Deserialize<'a>, @@ -105,7 +105,7 @@ where } } -impl Translation +impl Translation where DefaultAllocator: Allocator { /// Creates a new translation from the given vector. @@ -192,7 +192,7 @@ where DefaultAllocator: Allocator } } -impl Translation +impl Translation where DefaultAllocator: Allocator { /// Translate the given point. @@ -211,7 +211,7 @@ where DefaultAllocator: Allocator } } -impl Translation +impl Translation where DefaultAllocator: Allocator { /// Translate the given point by the inverse of this translation. @@ -228,9 +228,9 @@ where DefaultAllocator: Allocator } } -impl Eq for Translation where DefaultAllocator: Allocator {} +impl Eq for Translation where DefaultAllocator: Allocator {} -impl PartialEq for Translation +impl PartialEq for Translation where DefaultAllocator: Allocator { #[inline] @@ -239,7 +239,7 @@ where DefaultAllocator: Allocator } } -impl AbsDiffEq for Translation +impl AbsDiffEq for Translation where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -257,7 +257,7 @@ where } } -impl RelativeEq for Translation +impl RelativeEq for Translation where DefaultAllocator: Allocator, N::Epsilon: Copy, @@ -280,7 +280,7 @@ where } } -impl UlpsEq for Translation +impl UlpsEq for Translation where DefaultAllocator: Allocator, N::Epsilon: Copy, diff --git a/src/geometry/translation_construction.rs b/src/geometry/translation_construction.rs index 7ac3c5fc..339bdd2a 100644 --- a/src/geometry/translation_construction.rs +++ b/src/geometry/translation_construction.rs @@ -15,7 +15,7 @@ use crate::base::{DefaultAllocator, Scalar, VectorN}; use crate::geometry::Translation; -impl Translation +impl Translation where DefaultAllocator: Allocator { /// Creates a new identity translation. @@ -38,7 +38,7 @@ where DefaultAllocator: Allocator } } -impl One for Translation +impl One for Translation where DefaultAllocator: Allocator { #[inline] @@ -47,7 +47,7 @@ where DefaultAllocator: Allocator } } -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -59,7 +59,7 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for Translation +impl Arbitrary for Translation where DefaultAllocator: Allocator, Owned: Send, @@ -78,7 +78,7 @@ where */ macro_rules! componentwise_constructors_impl( ($($doc: expr; $D: ty, $($args: ident:$irow: expr),*);* $(;)*) => {$( - impl Translation + impl Translation where DefaultAllocator: Allocator { #[doc = "Initializes this translation from its components."] #[doc = "# Example\n```"] diff --git a/src/geometry/translation_conversion.rs b/src/geometry/translation_conversion.rs index ee2f0e6d..b44412e6 100644 --- a/src/geometry/translation_conversion.rs +++ b/src/geometry/translation_conversion.rs @@ -22,8 +22,8 @@ use crate::geometry::{Isometry, Point, Similarity, SuperTCategoryOf, TAffine, Tr impl SubsetOf> for Translation where - N1: Scalar + Clone, - N2: Scalar + Clone + SupersetOf, + N1: Scalar, + N2: Scalar + SupersetOf, DefaultAllocator: Allocator + Allocator, { #[inline] @@ -153,7 +153,7 @@ where } } -impl From> for MatrixN> +impl From> for MatrixN> where D: DimNameAdd, DefaultAllocator: Allocator + Allocator, DimNameSum>, @@ -164,7 +164,7 @@ where } } -impl From> for Translation +impl From> for Translation where DefaultAllocator: Allocator { #[inline] diff --git a/src/geometry/translation_coordinates.rs b/src/geometry/translation_coordinates.rs index 10e5926f..c422415c 100644 --- a/src/geometry/translation_coordinates.rs +++ b/src/geometry/translation_coordinates.rs @@ -16,7 +16,7 @@ use crate::geometry::Translation; macro_rules! deref_impl( ($D: ty, $Target: ident $(, $comps: ident)*) => { - impl Deref for Translation + impl Deref for Translation where DefaultAllocator: Allocator { type Target = $Target; @@ -26,7 +26,7 @@ macro_rules! deref_impl( } } - impl DerefMut for Translation + impl DerefMut for Translation where DefaultAllocator: Allocator { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 42cea3ad..521df894 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -318,7 +318,7 @@ where DefaultAllocator: Allocator + Allocator<(usize, usize), D> /// element `matrix[(i, i)]` is provided as argument. pub fn gauss_step(matrix: &mut Matrix, diag: N, i: usize) where - N: Scalar + Clone + Field, + N: Scalar + Field, S: StorageMut, { let mut submat = matrix.slice_range_mut(i.., i..); @@ -346,7 +346,7 @@ pub fn gauss_step_swap( i: usize, piv: usize, ) where - N: Scalar + Clone + Field, + N: Scalar + Field, S: StorageMut, { let piv = piv - i; diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index a7d2e8aa..ce493905 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -92,7 +92,7 @@ where DefaultAllocator: Allocator<(usize, usize), D> /// Applies this sequence of permutations to the rows of `rhs`. #[inline] - pub fn permute_rows(&self, rhs: &mut Matrix) + pub fn permute_rows(&self, rhs: &mut Matrix) where S2: StorageMut { for i in self.ipiv.rows_range(..self.len).iter() { rhs.swap_rows(i.0, i.1) @@ -101,7 +101,7 @@ where DefaultAllocator: Allocator<(usize, usize), D> /// Applies this sequence of permutations in reverse to the rows of `rhs`. #[inline] - pub fn inv_permute_rows( + pub fn inv_permute_rows( &self, rhs: &mut Matrix, ) where @@ -115,7 +115,7 @@ where DefaultAllocator: Allocator<(usize, usize), D> /// Applies this sequence of permutations to the columns of `rhs`. #[inline] - pub fn permute_columns( + pub fn permute_columns( &self, rhs: &mut Matrix, ) where @@ -128,7 +128,7 @@ where DefaultAllocator: Allocator<(usize, usize), D> /// Applies this sequence of permutations in reverse to the columns of `rhs`. #[inline] - pub fn inv_permute_columns( + pub fn inv_permute_columns( &self, rhs: &mut Matrix, ) where diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 9cc6b51b..9fc91af0 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -105,7 +105,7 @@ pub trait CsStorageMut: /// A storage of column-compressed sparse matrix based on a Vec. #[derive(Clone, Debug, PartialEq)] -pub struct CsVecStorage +pub struct CsVecStorage where DefaultAllocator: Allocator { pub(crate) shape: (R, C), @@ -114,7 +114,7 @@ where DefaultAllocator: Allocator pub(crate) vals: Vec, } -impl CsVecStorage +impl CsVecStorage where DefaultAllocator: Allocator { /// The value buffer of this storage. @@ -133,9 +133,9 @@ where DefaultAllocator: Allocator } } -impl CsVecStorage where DefaultAllocator: Allocator {} +impl CsVecStorage where DefaultAllocator: Allocator {} -impl<'a, N: Scalar + Clone, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage +impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage where DefaultAllocator: Allocator { type ColumnEntries = ColumnEntries<'a, N>; @@ -154,7 +154,7 @@ where DefaultAllocator: Allocator } } -impl CsStorage for CsVecStorage +impl CsStorage for CsVecStorage where DefaultAllocator: Allocator { #[inline] @@ -199,7 +199,7 @@ where DefaultAllocator: Allocator } } -impl<'a, N: Scalar + Clone, R: Dim, C: Dim> CsStorageIterMut<'a, N, R, C> for CsVecStorage +impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIterMut<'a, N, R, C> for CsVecStorage where DefaultAllocator: Allocator { type ValuesMut = slice::IterMut<'a, N>; @@ -220,11 +220,11 @@ where DefaultAllocator: Allocator } } -impl CsStorageMut for CsVecStorage where DefaultAllocator: Allocator +impl CsStorageMut for CsVecStorage where DefaultAllocator: Allocator {} /* -pub struct CsSliceStorage<'a, N: Scalar + Clone, R: Dim, C: DimAdd> { +pub struct CsSliceStorage<'a, N: Scalar, R: Dim, C: DimAdd> { shape: (R, C), p: VectorSlice>, i: VectorSlice, @@ -234,7 +234,7 @@ pub struct CsSliceStorage<'a, N: Scalar + Clone, R: Dim, C: DimAdd> { /// A compressed sparse column matrix. #[derive(Clone, Debug, PartialEq)] pub struct CsMatrix< - N: Scalar + Clone, + N: Scalar, R: Dim = Dynamic, C: Dim = Dynamic, S: CsStorage = CsVecStorage, @@ -246,7 +246,7 @@ pub struct CsMatrix< /// A column compressed sparse vector. pub type CsVector> = CsMatrix; -impl CsMatrix +impl CsMatrix where DefaultAllocator: Allocator { /// Creates a new compressed sparse column matrix with the specified dimension and @@ -323,7 +323,7 @@ where DefaultAllocator: Allocator } /* -impl CsMatrix { +impl CsMatrix { pub(crate) fn from_parts( nrows: usize, ncols: usize, @@ -340,7 +340,7 @@ impl CsMatrix { } */ -impl> CsMatrix { +impl> CsMatrix { pub(crate) fn from_data(data: S) -> Self { CsMatrix { data, @@ -433,7 +433,7 @@ impl> CsMatrix> CsMatrix { +impl> CsMatrix { /// Iterator through all the mutable values of this sparse matrix. #[inline] pub fn values_mut(&mut self) -> impl Iterator { @@ -441,7 +441,7 @@ impl> CsMatrix CsMatrix +impl CsMatrix where DefaultAllocator: Allocator { pub(crate) fn sort(&mut self) diff --git a/src/sparse/cs_matrix_conversion.rs b/src/sparse/cs_matrix_conversion.rs index 0844f22c..abf195e2 100644 --- a/src/sparse/cs_matrix_conversion.rs +++ b/src/sparse/cs_matrix_conversion.rs @@ -7,7 +7,7 @@ use crate::sparse::{CsMatrix, CsStorage}; use crate::storage::Storage; use crate::{DefaultAllocator, Dim, Dynamic, Matrix, MatrixMN, Scalar}; -impl<'a, N: Scalar + Clone + Zero + ClosedAdd> CsMatrix { +impl<'a, N: Scalar + Zero + ClosedAdd> CsMatrix { /// Creates a column-compressed sparse matrix from a sparse matrix in triplet form. pub fn from_triplet( nrows: usize, @@ -21,7 +21,7 @@ impl<'a, N: Scalar + Clone + Zero + ClosedAdd> CsMatrix { } } -impl<'a, N: Scalar + Clone + Zero + ClosedAdd, R: Dim, C: Dim> CsMatrix +impl<'a, N: Scalar + Zero + ClosedAdd, R: Dim, C: Dim> CsMatrix where DefaultAllocator: Allocator + Allocator { /// Creates a column-compressed sparse matrix from a sparse matrix in triplet form. @@ -66,7 +66,7 @@ where DefaultAllocator: Allocator + Allocator } } -impl<'a, N: Scalar + Clone + Zero, R: Dim, C: Dim, S> From> for MatrixMN +impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From> for MatrixMN where S: CsStorage, DefaultAllocator: Allocator, @@ -85,7 +85,7 @@ where } } -impl<'a, N: Scalar + Clone + Zero, R: Dim, C: Dim, S> From> for CsMatrix +impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From> for CsMatrix where S: Storage, DefaultAllocator: Allocator + Allocator, diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index e602fec6..8a4c063a 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -8,7 +8,7 @@ use crate::sparse::{CsMatrix, CsStorage, CsStorageMut, CsVector}; use crate::storage::StorageMut; use crate::{DefaultAllocator, Dim, Scalar, Vector, VectorN, U1}; -impl> CsMatrix { +impl> CsMatrix { fn scatter( &self, j: usize, @@ -39,7 +39,7 @@ impl> CsMatrix CsVector { +impl CsVector { pub fn axpy(&mut self, alpha: N, x: CsVector, beta: N) { // First, compute the number of non-zero entries. let mut nnzero = 0; @@ -76,7 +76,7 @@ impl CsVector { } */ -impl> Vector { +impl> Vector { /// Perform a sparse axpy operation: `self = alpha * x + beta * self` operation. pub fn axpy_cs(&mut self, alpha: N, x: &CsVector, beta: N) where @@ -126,7 +126,7 @@ impl Mul<&'b CsMatrix> for &'a CsMatrix where - N: Scalar + Clone + ClosedAdd + ClosedMul + Zero, + N: Scalar + ClosedAdd + ClosedMul + Zero, R1: Dim, C1: Dim, R2: Dim, @@ -219,7 +219,7 @@ where impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix> for &'a CsMatrix where - N: Scalar + Clone + ClosedAdd + ClosedMul + One, + N: Scalar + ClosedAdd + ClosedMul + One, R1: Dim, C1: Dim, R2: Dim, @@ -287,7 +287,7 @@ where impl<'a, 'b, N, R, C, S> Mul for CsMatrix where - N: Scalar + Clone + ClosedAdd + ClosedMul + Zero, + N: Scalar + ClosedAdd + ClosedMul + Zero, R: Dim, C: Dim, S: CsStorageMut, From bd7dd6e3456da68db169a2acfe70e9d5d65f5cf6 Mon Sep 17 00:00:00 2001 From: Avi Weinstock Date: Thu, 21 Nov 2019 17:15:18 -0500 Subject: [PATCH 44/67] Add Scalar + Copy bounds in nalgebra-lapack. ```bash export RELEVANT_SOURCEFILES="$(find nalgebra-lapack -name '*.rs')" for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar,/N\1: Scalar + Copy,/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/N\([0-9]\?\): *Scalar>/N\1: Scalar + Copy>/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/\([A-Z]*Scalar\): Scalar {/\1: Scalar + Copy {/' $f; done for f in $RELEVANT_SOURCEFILES; do sed -i 's/SVDScalar, C: Dim>: Scalar/SVDScalar, C: Dim>: Scalar + Copy/' $f; done ``` --- nalgebra-lapack/src/cholesky.rs | 6 +++--- nalgebra-lapack/src/eigen.rs | 6 +++--- nalgebra-lapack/src/hessenberg.rs | 6 +++--- nalgebra-lapack/src/lu.rs | 6 +++--- nalgebra-lapack/src/qr.rs | 6 +++--- nalgebra-lapack/src/schur.rs | 6 +++--- nalgebra-lapack/src/svd.rs | 6 +++--- nalgebra-lapack/src/symmetric_eigen.rs | 6 +++--- 8 files changed, 24 insertions(+), 24 deletions(-) diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs index 02552915..2014da76 100644 --- a/nalgebra-lapack/src/cholesky.rs +++ b/nalgebra-lapack/src/cholesky.rs @@ -28,13 +28,13 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct Cholesky +pub struct Cholesky where DefaultAllocator: Allocator { l: MatrixN, } -impl Copy for Cholesky +impl Copy for Cholesky where DefaultAllocator: Allocator, MatrixN: Copy, @@ -175,7 +175,7 @@ where DefaultAllocator: Allocator */ /// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex`, `Complex`) /// supported by the cholesky decomposition. -pub trait CholeskyScalar: Scalar { +pub trait CholeskyScalar: Scalar + Copy { #[allow(missing_docs)] fn xpotrf(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32); #[allow(missing_docs)] diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index d5397841..0db48fc1 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -33,7 +33,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct Eigen +pub struct Eigen where DefaultAllocator: Allocator + Allocator { /// The eigenvalues of the decomposed matrix. @@ -44,7 +44,7 @@ where DefaultAllocator: Allocator + Allocator pub left_eigenvectors: Option>, } -impl Copy for Eigen +impl Copy for Eigen where DefaultAllocator: Allocator + Allocator, VectorN: Copy, @@ -311,7 +311,7 @@ where DefaultAllocator: Allocator + Allocator */ /// Trait implemented by scalar type for which Lapack function exist to compute the /// eigendecomposition. -pub trait EigenScalar: Scalar { +pub trait EigenScalar: Scalar + Copy { #[allow(missing_docs)] fn xgeev( jobvl: u8, diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index c9f8d282..8048bfcc 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -30,14 +30,14 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct Hessenberg> +pub struct Hessenberg> where DefaultAllocator: Allocator + Allocator> { h: MatrixN, tau: VectorN>, } -impl> Copy for Hessenberg +impl> Copy for Hessenberg where DefaultAllocator: Allocator + Allocator>, MatrixN: Copy, @@ -137,7 +137,7 @@ where DefaultAllocator: Allocator + Allocator> * Lapack functions dispatch. * */ -pub trait HessenbergScalar: Scalar { +pub trait HessenbergScalar: Scalar + Copy { fn xgehrd( n: i32, ilo: i32, diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index ada9bb34..b087aea7 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -37,14 +37,14 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct LU, C: Dim> +pub struct LU, C: Dim> where DefaultAllocator: Allocator> + Allocator { lu: MatrixMN, p: VectorN>, } -impl, C: Dim> Copy for LU +impl, C: Dim> Copy for LU where DefaultAllocator: Allocator + Allocator>, MatrixMN: Copy, @@ -306,7 +306,7 @@ where * */ /// Trait implemented by scalars for which Lapack implements the LU decomposition. -pub trait LUScalar: Scalar { +pub trait LUScalar: Scalar + Copy { #[allow(missing_docs)] fn xgetrf(m: i32, n: i32, a: &mut [Self], lda: i32, ipiv: &mut [i32], info: &mut i32); #[allow(missing_docs)] diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index 5d4b3108..58a040d5 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -33,14 +33,14 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct QR, C: Dim> +pub struct QR, C: Dim> where DefaultAllocator: Allocator + Allocator> { qr: MatrixMN, tau: VectorN>, } -impl, C: Dim> Copy for QR +impl, C: Dim> Copy for QR where DefaultAllocator: Allocator + Allocator>, MatrixMN: Copy, @@ -166,7 +166,7 @@ where DefaultAllocator: Allocator */ /// Trait implemented by scalar types for which Lapack function exist to compute the /// QR decomposition. -pub trait QRScalar: Scalar { +pub trait QRScalar: Scalar + Copy { fn xgeqrf( m: i32, n: i32, diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index eb618fe9..e61a22ea 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -33,7 +33,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct Schur +pub struct Schur where DefaultAllocator: Allocator + Allocator { re: VectorN, @@ -42,7 +42,7 @@ where DefaultAllocator: Allocator + Allocator q: MatrixN, } -impl Copy for Schur +impl Copy for Schur where DefaultAllocator: Allocator + Allocator, MatrixN: Copy, @@ -162,7 +162,7 @@ where DefaultAllocator: Allocator + Allocator * */ /// Trait implemented by scalars for which Lapack implements the RealField Schur decomposition. -pub trait SchurScalar: Scalar { +pub trait SchurScalar: Scalar + Copy { #[allow(missing_docs)] fn xgees( jobvs: u8, diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 9363fced..1cd0d24b 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -36,7 +36,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct SVD, C: Dim> +pub struct SVD, C: Dim> where DefaultAllocator: Allocator + Allocator> + Allocator { /// The left-singular vectors `U` of this SVD. @@ -47,7 +47,7 @@ where DefaultAllocator: Allocator + Allocator> + Al pub singular_values: VectorN>, } -impl, C: Dim> Copy for SVD +impl, C: Dim> Copy for SVD where DefaultAllocator: Allocator + Allocator + Allocator>, MatrixMN: Copy, @@ -57,7 +57,7 @@ where /// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex`, `Complex`) /// supported by the Singular Value Decompotition. -pub trait SVDScalar, C: Dim>: Scalar +pub trait SVDScalar, C: Dim>: Scalar + Copy where DefaultAllocator: Allocator + Allocator + Allocator> diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index d34b3fce..d50ee805 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -35,7 +35,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct SymmetricEigen +pub struct SymmetricEigen where DefaultAllocator: Allocator + Allocator { /// The eigenvectors of the decomposed matrix. @@ -45,7 +45,7 @@ where DefaultAllocator: Allocator + Allocator pub eigenvalues: VectorN, } -impl Copy for SymmetricEigen +impl Copy for SymmetricEigen where DefaultAllocator: Allocator + Allocator, MatrixN: Copy, @@ -169,7 +169,7 @@ where DefaultAllocator: Allocator + Allocator */ /// Trait implemented by scalars for which Lapack implements the eigendecomposition of symmetric /// real matrices. -pub trait SymmetricEigenScalar: Scalar { +pub trait SymmetricEigenScalar: Scalar + Copy { #[allow(missing_docs)] fn xsyev( jobz: u8, From 8bf94f7afb9709438212f190d3411662d601597a Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Tue, 24 Dec 2019 17:37:27 +0100 Subject: [PATCH 45/67] Add matrix/slice conversions. Fix #687. --- src/base/conversion.rs | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 883f3fb8..414a3406 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -21,7 +21,7 @@ use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, Sto #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::VecStorage; use crate::base::{SliceStorage, SliceStorageMut}; -use crate::base::{DefaultAllocator, Matrix, ArrayStorage, MatrixMN, MatrixSlice, MatrixSliceMut, Scalar}; +use crate::base::{DefaultAllocator, Matrix, ArrayStorage, MatrixMN, MatrixSlice, MatrixSliceMut, Scalar, DVectorSlice, DVectorSliceMut}; use crate::constraint::DimEq; // FIXME: too bad this won't work allo slice conversions. @@ -524,4 +524,33 @@ for MatrixSliceMut<'a, N, RSlice, CSlice, RStride, CStride> Matrix::from_data_statically_unchecked(data) } } +} + +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorage> Into<&'a [N]> for &'a Matrix { + #[inline] + fn into(self) -> &'a [N] { + self.as_slice() + } +} + +impl<'a, N: Scalar + Copy, R: Dim, C: Dim, S: ContiguousStorageMut> Into<&'a mut [N]> for &'a mut Matrix { + #[inline] + fn into(self) -> &'a mut [N] { + self.as_mut_slice() + } +} + + +impl<'a, N: Scalar + Copy> From<&'a [N]> for DVectorSlice<'a, N> { + #[inline] + fn from(slice: &'a [N]) -> Self { + Self::from_slice(slice, slice.len()) + } +} + +impl<'a, N: Scalar + Copy> From<&'a mut [N]> for DVectorSliceMut<'a, N> { + #[inline] + fn from(slice: &'a mut [N]) -> Self { + Self::from_slice(slice, slice.len()) + } } \ No newline at end of file From 43747b4f598eeed44d9ee7e9787a9d451ab8681d Mon Sep 17 00:00:00 2001 From: Mara Bos Date: Fri, 29 Nov 2019 15:31:52 +0100 Subject: [PATCH 46/67] Implement Extend<&N> for VecStorage. Extend was already implemented, but nalgebra vectors/matrices give iterators that give &N, not N, so implementing Extend<&N> as well makes it easier to use. It seems common practice to do so: The standard library's Vec also implments Extend for both T and &T. --- src/base/vec_storage.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index a0230488..747e5179 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -268,6 +268,21 @@ impl Extend for VecStorage } } +impl<'a, N: 'a + Copy, R: Dim> Extend<&'a N> for VecStorage +{ + /// Extends the number of columns of the `VecStorage` with elements + /// from the given iterator. + /// + /// # Panics + /// This function panics if the number of elements yielded by the + /// given iterator is not a multiple of the number of rows of the + /// `VecStorage`. + fn extend>(&mut self, iter: I) + { + self.extend(iter.into_iter().copied()) + } +} + impl Extend> for VecStorage where N: Scalar + Copy, From 35eafa03371e126bff28f035a7bba61af08732cb Mon Sep 17 00:00:00 2001 From: "S.Brandeis" Date: Sun, 19 Jan 2020 02:39:17 +0100 Subject: [PATCH 47/67] Add unit test for more general PartialEq trait impl. for Matrix type --- tests/core/matrix.rs | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/tests/core/matrix.rs b/tests/core/matrix.rs index e4fb4d0c..163049ab 100644 --- a/tests/core/matrix.rs +++ b/tests/core/matrix.rs @@ -1,7 +1,7 @@ use num::{One, Zero}; use std::cmp::Ordering; -use na::dimension::{U15, U8}; +use na::dimension::{U15, U8, U2}; use na::{ self, DMatrix, DVector, Matrix2, Matrix2x3, Matrix2x4, Matrix3, Matrix3x2, Matrix3x4, Matrix4, Matrix4x3, Matrix4x5, Matrix5, Matrix6, MatrixMN, RowVector3, RowVector4, RowVector5, @@ -1047,3 +1047,28 @@ mod finite_dim_inner_space_tests { true } } + +#[test] +fn partial_eq() { + let dynamic_mat = DMatrix::from_row_slice(2, 4, &[1, 2, 3, 4, 5, 6, 7, 8]); + let static_mat = Matrix2x4::new(1, 2, 3, 4, 5, 6, 7, 8); + + let dyn_static_slice = dynamic_mat.fixed_slice::(0, 0); + let dyn_dyn_slice = dynamic_mat.slice((0, 0), (2, 2)); + let static_static_slice = static_mat.fixed_slice::(0, 0); + let static_dyn_slice = static_mat.slice((0, 0), (2, 2)); + + let larger_slice = static_mat.slice((0, 0), (2, 3)); + + assert_eq!(dynamic_mat, static_mat); + + assert_eq!(dyn_static_slice, dyn_dyn_slice); + assert_eq!(dyn_static_slice, static_static_slice); + assert_eq!(dyn_static_slice, static_dyn_slice); + assert_eq!(dyn_dyn_slice, static_static_slice); + assert_eq!(dyn_dyn_slice, static_dyn_slice); + assert_eq!(static_static_slice, static_dyn_slice); + + assert_ne!(dynamic_mat, static_dyn_slice); + assert_ne!(static_dyn_slice, larger_slice); +} From 215df7948e1e9373e90b4ebbf998ce2bd70afd65 Mon Sep 17 00:00:00 2001 From: "S.Brandeis" Date: Sun, 19 Jan 2020 03:01:31 +0100 Subject: [PATCH 48/67] Modify PartialEq test to take into account typenum UInt dimensions --- tests/core/matrix.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/core/matrix.rs b/tests/core/matrix.rs index 163049ab..74fc6153 100644 --- a/tests/core/matrix.rs +++ b/tests/core/matrix.rs @@ -7,6 +7,9 @@ use na::{ Matrix4x3, Matrix4x5, Matrix5, Matrix6, MatrixMN, RowVector3, RowVector4, RowVector5, Vector1, Vector2, Vector3, Vector4, Vector5, Vector6, }; +use typenum::{UInt, UTerm}; +use serde_json::error::Category::Data; +use typenum::bit::{B0, B1}; #[test] fn iter() { @@ -1053,6 +1056,9 @@ fn partial_eq() { let dynamic_mat = DMatrix::from_row_slice(2, 4, &[1, 2, 3, 4, 5, 6, 7, 8]); let static_mat = Matrix2x4::new(1, 2, 3, 4, 5, 6, 7, 8); + type TypeNumInt = typenum::UInt, B1>; + let typenum_static_mat = MatrixMN::::new(1, 2, 3, 4, 5, 6, 7, 8); + let dyn_static_slice = dynamic_mat.fixed_slice::(0, 0); let dyn_dyn_slice = dynamic_mat.slice((0, 0), (2, 2)); let static_static_slice = static_mat.fixed_slice::(0, 0); @@ -1061,6 +1067,8 @@ fn partial_eq() { let larger_slice = static_mat.slice((0, 0), (2, 3)); assert_eq!(dynamic_mat, static_mat); + assert_eq!(dynamic_mat, typenum_static_mat); + assert_eq!(typenum_static_mat, static_mat); assert_eq!(dyn_static_slice, dyn_dyn_slice); assert_eq!(dyn_static_slice, static_static_slice); From 39a433c0e798da14e957a4196ac0b342ad1b6ae8 Mon Sep 17 00:00:00 2001 From: "S.Brandeis" Date: Sun, 19 Jan 2020 03:02:05 +0100 Subject: [PATCH 49/67] Broader PartialEq implementation for types implementing Dim trait --- src/base/dimension.rs | 45 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/src/base/dimension.rs b/src/base/dimension.rs index 5d1d1bd9..cabd8a1c 100644 --- a/src/base/dimension.rs +++ b/src/base/dimension.rs @@ -14,7 +14,7 @@ use typenum::{ use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// Dim of dynamically-sized algebraic entities. -#[derive(Clone, Copy, Eq, PartialEq, Debug)] +#[derive(Clone, Copy, Eq, Debug)] pub struct Dynamic { value: usize, } @@ -107,6 +107,12 @@ impl Sub for Dynamic { } } +impl PartialEq for Dynamic { + fn eq(&self, other: &T) -> bool { + self.value() == other.value() + } +} + /* * * Operations. @@ -244,7 +250,7 @@ impl NamedDim for typenum::U1 { macro_rules! named_dimension( ($($D: ident),* $(,)*) => {$( /// A type level dimension. - #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] + #[derive(Debug, Copy, Clone, Hash, Eq)] #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] pub struct $D; @@ -280,6 +286,12 @@ macro_rules! named_dimension( } impl IsNotStaticOne for $D { } + + impl PartialEq for $D { + fn eq(&self, other: &T) -> bool { + self.value() == other.value() + } + } )*} ); @@ -367,6 +379,23 @@ impl< { } +impl< + T: Dim, + A: Bit + Any + Debug + Copy + PartialEq + Send + Sync, + B: Bit + Any + Debug + Copy + PartialEq + Send + Sync, + C: Bit + Any + Debug + Copy + PartialEq + Send + Sync, + D: Bit + Any + Debug + Copy + PartialEq + Send + Sync, + E: Bit + Any + Debug + Copy + PartialEq + Send + Sync, + F: Bit + Any + Debug + Copy + PartialEq + Send + Sync, + G: Bit + Any + Debug + Copy + PartialEq + Send + Sync, + > PartialEq + for UInt, A>, B>, C>, D>, E>, F>, G> +{ + fn eq(&self, other: &T) -> bool { + self.value() == other.value() + } +} + impl NamedDim for UInt { @@ -408,3 +437,15 @@ impl { } + +impl< + T: Dim, + U: Unsigned + DimName, + B: Bit + Any + Debug + Copy + PartialEq + Send + Sync + > PartialEq + for UInt +{ + fn eq(&self, other: &T) -> bool { + self.value() == other.value() + } +} From 2b8410e08bc0432868cacae8e9c4655edbb7c893 Mon Sep 17 00:00:00 2001 From: "S.Brandeis" Date: Sun, 19 Jan 2020 03:06:00 +0100 Subject: [PATCH 50/67] Fix bug - PartialEq for Matrix no longer panics when shapes do not match --- src/base/matrix.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 95dda818..449d76cd 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -1350,12 +1350,11 @@ where S: Storage, { #[inline] - fn eq(&self, right: &Matrix) -> bool { - assert!( - self.shape() == right.shape(), - "Matrix equality test dimension mismatch." - ); - self.iter().zip(right.iter()).all(|(l, r)| l == r) + fn eq(&self, right: &Matrix) -> bool { + if self.shape() == right.shape() { + return self.iter().zip(right.iter()).all(|(l, r)| l == r) + } + false } } From 50ea55e877c50c5ea126fa265fee29492f437b88 Mon Sep 17 00:00:00 2001 From: "S.Brandeis" Date: Sun, 19 Jan 2020 03:06:36 +0100 Subject: [PATCH 51/67] Modify PartialEq for Matrix to allow comparison with all types of Matrix --- src/base/matrix.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 449d76cd..5d60b970 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -1344,10 +1344,15 @@ where S: Storage, {} -impl PartialEq for Matrix +impl PartialEq> for Matrix where - N: Scalar, + N: Scalar + PartialEq, + C: Dim + PartialEq, + C2: Dim, + R: Dim + PartialEq, + R2: Dim, S: Storage, + S2: Storage { #[inline] fn eq(&self, right: &Matrix) -> bool { From 12c2efdb668f534ef3899d3b089b7982868b81ed Mon Sep 17 00:00:00 2001 From: "S.Brandeis" Date: Sun, 19 Jan 2020 16:21:32 +0100 Subject: [PATCH 52/67] Separate test for shape mismatch and for PartialEq on different types --- tests/core/matrix.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/core/matrix.rs b/tests/core/matrix.rs index 74fc6153..2dbac323 100644 --- a/tests/core/matrix.rs +++ b/tests/core/matrix.rs @@ -1,7 +1,7 @@ use num::{One, Zero}; use std::cmp::Ordering; -use na::dimension::{U15, U8, U2}; +use na::dimension::{U15, U8, U2, U4}; use na::{ self, DMatrix, DVector, Matrix2, Matrix2x3, Matrix2x4, Matrix3, Matrix3x2, Matrix3x4, Matrix4, Matrix4x3, Matrix4x5, Matrix5, Matrix6, MatrixMN, RowVector3, RowVector4, RowVector5, @@ -1052,7 +1052,16 @@ mod finite_dim_inner_space_tests { } #[test] -fn partial_eq() { +fn partial_eq_shape_mismatch() { + let a = Matrix2::new(1, 2, 3, 4); + let b = Matrix2x3::new(1, 2, 3, 4, 5, 6); + assert_ne!(a, b); + assert_ne!(b, a); +} + +#[test] +fn partial_eq_different_types() { + // Ensure comparability of several types of Matrices let dynamic_mat = DMatrix::from_row_slice(2, 4, &[1, 2, 3, 4, 5, 6, 7, 8]); let static_mat = Matrix2x4::new(1, 2, 3, 4, 5, 6, 7, 8); From 703ba3e716f8a203fce349a36c4f6b925e004cc3 Mon Sep 17 00:00:00 2001 From: "S.Brandeis" Date: Sun, 19 Jan 2020 16:23:50 +0100 Subject: [PATCH 53/67] Refactor var names and add symmetric assert_eq!()s in test; add TODO --- tests/core/matrix.rs | 56 ++++++++++++++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 18 deletions(-) diff --git a/tests/core/matrix.rs b/tests/core/matrix.rs index 2dbac323..6fad5e8c 100644 --- a/tests/core/matrix.rs +++ b/tests/core/matrix.rs @@ -1065,27 +1065,47 @@ fn partial_eq_different_types() { let dynamic_mat = DMatrix::from_row_slice(2, 4, &[1, 2, 3, 4, 5, 6, 7, 8]); let static_mat = Matrix2x4::new(1, 2, 3, 4, 5, 6, 7, 8); - type TypeNumInt = typenum::UInt, B1>; - let typenum_static_mat = MatrixMN::::new(1, 2, 3, 4, 5, 6, 7, 8); + let mut typenum_static_mat = MatrixMN::::zeros(); + let mut slice = typenum_static_mat.slice_mut((0,0), (2, 4)); + slice += static_mat; - let dyn_static_slice = dynamic_mat.fixed_slice::(0, 0); - let dyn_dyn_slice = dynamic_mat.slice((0, 0), (2, 2)); - let static_static_slice = static_mat.fixed_slice::(0, 0); - let static_dyn_slice = static_mat.slice((0, 0), (2, 2)); + let fslice_of_dmat = dynamic_mat.fixed_slice::(0, 0); + let dslice_of_dmat = dynamic_mat.slice((0, 0), (2, 2)); + let fslice_of_smat = static_mat.fixed_slice::(0, 0); + let dslice_of_smat = static_mat.slice((0, 0), (2, 2)); - let larger_slice = static_mat.slice((0, 0), (2, 3)); - assert_eq!(dynamic_mat, static_mat); - assert_eq!(dynamic_mat, typenum_static_mat); - assert_eq!(typenum_static_mat, static_mat); + assert_eq!(static_mat, dynamic_mat); - assert_eq!(dyn_static_slice, dyn_dyn_slice); - assert_eq!(dyn_static_slice, static_static_slice); - assert_eq!(dyn_static_slice, static_dyn_slice); - assert_eq!(dyn_dyn_slice, static_static_slice); - assert_eq!(dyn_dyn_slice, static_dyn_slice); - assert_eq!(static_static_slice, static_dyn_slice); + assert_eq!(dynamic_mat, slice); + assert_eq!(slice, dynamic_mat); + + assert_eq!(static_mat, slice); + assert_eq!(slice, static_mat); + + assert_eq!(fslice_of_dmat, dslice_of_dmat); + assert_eq!(dslice_of_dmat, fslice_of_dmat); + + assert_eq!(fslice_of_dmat, fslice_of_smat); + assert_eq!(fslice_of_smat, fslice_of_dmat); + + assert_eq!(fslice_of_dmat, dslice_of_smat); + assert_eq!(dslice_of_smat, fslice_of_dmat); + + assert_eq!(dslice_of_dmat, fslice_of_smat); + assert_eq!(fslice_of_smat, dslice_of_dmat); + + assert_eq!(dslice_of_dmat, dslice_of_smat); + assert_eq!(dslice_of_smat, dslice_of_dmat); + + assert_eq!(fslice_of_smat, dslice_of_smat); + assert_eq!(dslice_of_smat, fslice_of_smat); + + assert_ne!(dynamic_mat, dslice_of_smat); + assert_ne!(dslice_of_smat, dynamic_mat); + + // TODO - implement those comparisons + // assert_ne!(static_mat, typenum_static_mat); + //assert_ne!(typenum_static_mat, static_mat); - assert_ne!(dynamic_mat, static_dyn_slice); - assert_ne!(static_dyn_slice, larger_slice); } From 8a3f6a12cdfe7afc8f06c8ff571e6836d35149af Mon Sep 17 00:00:00 2001 From: "S.Brandeis" Date: Sun, 19 Jan 2020 16:24:26 +0100 Subject: [PATCH 54/67] Refactor and move named_dimension! macro --- src/base/dimension.rs | 147 +++++++++++++++++------------------------- 1 file changed, 59 insertions(+), 88 deletions(-) diff --git a/src/base/dimension.rs b/src/base/dimension.rs index cabd8a1c..17462d41 100644 --- a/src/base/dimension.rs +++ b/src/base/dimension.rs @@ -247,66 +247,6 @@ impl NamedDim for typenum::U1 { type Name = U1; } -macro_rules! named_dimension( - ($($D: ident),* $(,)*) => {$( - /// A type level dimension. - #[derive(Debug, Copy, Clone, Hash, Eq)] - #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] - pub struct $D; - - impl Dim for $D { - #[inline] - fn try_to_usize() -> Option { - Some(typenum::$D::to_usize()) - } - - #[inline] - fn from_usize(dim: usize) -> Self { - assert!(dim == typenum::$D::to_usize(), "Mismatched dimension."); - $D - } - - #[inline] - fn value(&self) -> usize { - typenum::$D::to_usize() - } - } - - impl DimName for $D { - type Value = typenum::$D; - - #[inline] - fn name() -> Self { - $D - } - } - - impl NamedDim for typenum::$D { - type Name = $D; - } - - impl IsNotStaticOne for $D { } - - impl PartialEq for $D { - fn eq(&self, other: &T) -> bool { - self.value() == other.value() - } - } - )*} -); - -// We give explicit names to all Unsigned in [0, 128[ -named_dimension!( - U0, /*U1,*/ U2, U3, U4, U5, U6, U7, U8, U9, U10, U11, U12, U13, U14, U15, U16, U17, U18, - U19, U20, U21, U22, U23, U24, U25, U26, U27, U28, U29, U30, U31, U32, U33, U34, U35, U36, U37, - U38, U39, U40, U41, U42, U43, U44, U45, U46, U47, U48, U49, U50, U51, U52, U53, U54, U55, U56, - U57, U58, U59, U60, U61, U62, U63, U64, U65, U66, U67, U68, U69, U70, U71, U72, U73, U74, U75, - U76, U77, U78, U79, U80, U81, U82, U83, U84, U85, U86, U87, U88, U89, U90, U91, U92, U93, U94, - U95, U96, U97, U98, U99, U100, U101, U102, U103, U104, U105, U106, U107, U108, U109, U110, - U111, U112, U113, U114, U115, U116, U117, U118, U119, U120, U121, U122, U123, U124, U125, U126, - U127 -); - // For values greater than U1023, just use the typenum binary representation directly. impl< A: Bit + Any + Debug + Copy + PartialEq + Send + Sync, @@ -379,23 +319,6 @@ impl< { } -impl< - T: Dim, - A: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - B: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - C: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - D: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - E: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - F: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - G: Bit + Any + Debug + Copy + PartialEq + Send + Sync, - > PartialEq - for UInt, A>, B>, C>, D>, E>, F>, G> -{ - fn eq(&self, other: &T) -> bool { - self.value() == other.value() - } -} - impl NamedDim for UInt { @@ -438,14 +361,62 @@ impl PartialEq - for UInt -{ - fn eq(&self, other: &T) -> bool { - self.value() == other.value() - } -} +macro_rules! named_dimension( + ($($D: ident),* $(,)*) => {$( + /// A type level dimension. + #[derive(Debug, Copy, Clone, Hash, Eq)] + #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] + pub struct $D; + + impl Dim for $D { + #[inline] + fn try_to_usize() -> Option { + Some(typenum::$D::to_usize()) + } + + #[inline] + fn from_usize(dim: usize) -> Self { + assert!(dim == typenum::$D::to_usize(), "Mismatched dimension."); + $D + } + + #[inline] + fn value(&self) -> usize { + typenum::$D::to_usize() + } + } + + impl DimName for $D { + type Value = typenum::$D; + + #[inline] + fn name() -> Self { + $D + } + } + + impl NamedDim for typenum::$D { + type Name = $D; + } + + impl IsNotStaticOne for $D { } + + impl PartialEq for $D { + fn eq(&self, other: &T) -> bool { + self.value() == other.value() + } + } + )*} +); + +// We give explicit names to all Unsigned in [0, 128[ +named_dimension!( + U0, /*U1,*/ U2, U3, U4, U5, U6, U7, U8, U9, U10, U11, U12, U13, U14, U15, U16, U17, U18, + U19, U20, U21, U22, U23, U24, U25, U26, U27, U28, U29, U30, U31, U32, U33, U34, U35, U36, U37, + U38, U39, U40, U41, U42, U43, U44, U45, U46, U47, U48, U49, U50, U51, U52, U53, U54, U55, U56, + U57, U58, U59, U60, U61, U62, U63, U64, U65, U66, U67, U68, U69, U70, U71, U72, U73, U74, U75, + U76, U77, U78, U79, U80, U81, U82, U83, U84, U85, U86, U87, U88, U89, U90, U91, U92, U93, U94, + U95, U96, U97, U98, U99, U100, U101, U102, U103, U104, U105, U106, U107, U108, U109, U110, + U111, U112, U113, U114, U115, U116, U117, U118, U119, U120, U121, U122, U123, U124, U125, U126, + U127, +); From 403e63dc5eb7af2db8cdca93b6c1f070eb736456 Mon Sep 17 00:00:00 2001 From: "S.Brandeis" Date: Sat, 25 Jan 2020 12:56:17 +0100 Subject: [PATCH 55/67] Remove trailing whitespace --- src/base/matrix.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 5d60b970..2a87d697 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -1137,7 +1137,7 @@ impl + IsNotStaticOne, S: Storage where DefaultAllocator: Allocator, DimSum> { assert!(self.is_square(), "Only square matrices can currently be transformed to homogeneous coordinates."); let dim = DimSum::::from_usize(self.nrows() + 1); - let mut res = MatrixN::identity_generic(dim, dim); + let mut res = MatrixN::identity_generic(dim, dim); res.generic_slice_mut::((0, 0), self.data.shape()).copy_from(&self); res } From d35ef06f445af4f0e9dffc9190f4324a866c1f94 Mon Sep 17 00:00:00 2001 From: "S.Brandeis" Date: Sat, 25 Jan 2020 12:57:15 +0100 Subject: [PATCH 56/67] Remove unnecessary PartialEq trait use in PartialEq impl for Matrix --- src/base/matrix.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 2a87d697..01105524 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -1347,9 +1347,9 @@ where impl PartialEq> for Matrix where N: Scalar + PartialEq, - C: Dim + PartialEq, + C: Dim, C2: Dim, - R: Dim + PartialEq, + R: Dim, R2: Dim, S: Storage, S2: Storage From 5f4a0c7b1345aaa26ce80128d31a199d097b2cb7 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Mon, 23 Dec 2019 23:27:20 +0100 Subject: [PATCH 57/67] Fix slerp for regular vectors. --- src/base/matrix.rs | 32 ++++++++++++++++---------------- src/geometry/quaternion.rs | 32 ++++++++++++++++++++++++-------- 2 files changed, 40 insertions(+), 24 deletions(-) diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 08c22cbb..7df3cb3b 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -1616,31 +1616,31 @@ impl> Unit> { +impl> Unit> { /// Computes the spherical linear interpolation between two unit vectors. /// /// # Examples: /// /// ``` - /// # use nalgebra::geometry::UnitQuaternion; + /// # use nalgebra::Vector2; /// - /// let q1 = UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_4, 0.0, 0.0); - /// let q2 = UnitQuaternion::from_euler_angles(-std::f32::consts::PI, 0.0, 0.0); + /// let v1 = Vector2::new(1.0, 2.0); + /// let v2 = Vector2::new(2.0, -3.0); /// - /// let q = q1.slerp(&q2, 1.0 / 3.0); + /// let v = v1.slerp(&v2, 1.0); /// - /// assert_eq!(q.euler_angles(), (std::f32::consts::FRAC_PI_2, 0.0, 0.0)); + /// assert_eq!(v, v2); /// ``` pub fn slerp>( &self, rhs: &Unit>, - t: N::RealField, + t: N, ) -> Unit> where DefaultAllocator: Allocator, { // FIXME: the result is wrong when self and rhs are collinear with opposite direction. - self.try_slerp(rhs, t, N::RealField::default_epsilon()) + self.try_slerp(rhs, t, N::default_epsilon()) .unwrap_or(Unit::new_unchecked(self.clone_owned())) } @@ -1651,30 +1651,30 @@ impl> Unit> { pub fn try_slerp>( &self, rhs: &Unit>, - t: N::RealField, - epsilon: N::RealField, + t: N, + epsilon: N, ) -> Option>> where DefaultAllocator: Allocator, { - let (c_hang, c_hang_sign) = self.dotc(rhs).to_exp(); + let c_hang = self.dot(rhs); // self == other - if c_hang >= N::RealField::one() { + if c_hang >= N::one() { return Some(Unit::new_unchecked(self.clone_owned())); } let hang = c_hang.acos(); - let s_hang = (N::RealField::one() - c_hang * c_hang).sqrt(); + let s_hang = (N::one() - c_hang * c_hang).sqrt(); // FIXME: what if s_hang is 0.0 ? The result is not well-defined. - if relative_eq!(s_hang, N::RealField::zero(), epsilon = epsilon) { + if relative_eq!(s_hang, N::zero(), epsilon = epsilon) { None } else { - let ta = ((N::RealField::one() - t) * hang).sin() / s_hang; + let ta = ((N::one() - t) * hang).sin() / s_hang; let tb = (t * hang).sin() / s_hang; let mut res = self.scale(ta); - res.axpy(c_hang_sign.scale(tb), &**rhs, N::one()); + res.axpy(tb, &**rhs, N::one()); Some(Unit::new_unchecked(res)) } diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 40c085f2..a18873e5 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -1067,13 +1067,22 @@ impl UnitQuaternion { /// /// Panics if the angle between both quaternion is 180 degrees (in which case the interpolation /// is not well-defined). Use `.try_slerp` instead to avoid the panic. + /// + /// # Examples: + /// + /// ``` + /// # use nalgebra::geometry::UnitQuaternion; + /// + /// let q1 = UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_4, 0.0, 0.0); + /// let q2 = UnitQuaternion::from_euler_angles(-std::f32::consts::PI, 0.0, 0.0); + /// + /// let q = q1.slerp(&q2, 1.0 / 3.0); + /// + /// assert_eq!(q.euler_angles(), (std::f32::consts::FRAC_PI_2, 0.0, 0.0)); + /// ``` #[inline] pub fn slerp(&self, other: &Self, t: N) -> Self { - Unit::new_unchecked(Quaternion::from( - Unit::new_unchecked(self.coords) - .slerp(&Unit::new_unchecked(other.coords), t) - .into_inner(), - )) + self.try_slerp(other, t, N::default_epsilon()).expect("Quaternion slerp: ambiguous configuration.") } /// Computes the spherical linear interpolation between two unit quaternions or returns `None` @@ -1094,9 +1103,16 @@ impl UnitQuaternion { epsilon: N, ) -> Option { - Unit::new_unchecked(self.coords) - .try_slerp(&Unit::new_unchecked(other.coords), t, epsilon) - .map(|q| Unit::new_unchecked(Quaternion::from(q.into_inner()))) + let coords = if self.coords.dot(&other.coords) < N::zero() { + Unit::new_unchecked(self.coords) + .try_slerp(&Unit::new_unchecked(-other.coords), t, epsilon) + } else { + Unit::new_unchecked(self.coords) + .try_slerp(&Unit::new_unchecked(other.coords), t, epsilon) + }; + + + coords.map(|q| Unit::new_unchecked(Quaternion::from(q.into_inner()))) } /// Compute the conjugate of this unit quaternion in-place. From 08d990f2760a4a4b3d490c4ac5e27aeaa2bfa0d3 Mon Sep 17 00:00:00 2001 From: "S.Brandeis" Date: Sat, 25 Jan 2020 12:58:05 +0100 Subject: [PATCH 58/67] Make shape check a one-liner in impl PartialEq for Matrix --- src/base/matrix.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 01105524..8bc02b21 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -1356,10 +1356,7 @@ where { #[inline] fn eq(&self, right: &Matrix) -> bool { - if self.shape() == right.shape() { - return self.iter().zip(right.iter()).all(|(l, r)| l == r) - } - false + self.shape() == right.shape() && self.iter().zip(right.iter()).all(|(l, r)| l == r) } } From 8577711e3b1e4bcd044ed993d255c94df19c79d0 Mon Sep 17 00:00:00 2001 From: Alexander Bulaev Date: Mon, 13 Jan 2020 23:30:40 +0300 Subject: [PATCH 59/67] nalgebra-lapack: update lapack-src --- nalgebra-lapack/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nalgebra-lapack/Cargo.toml b/nalgebra-lapack/Cargo.toml index 04abbbbd..817d92ef 100644 --- a/nalgebra-lapack/Cargo.toml +++ b/nalgebra-lapack/Cargo.toml @@ -30,7 +30,7 @@ alga = { version = "0.9", default-features = false } serde = { version = "1.0", optional = true } serde_derive = { version = "1.0", optional = true } lapack = { version = "0.16", default-features = false } -lapack-src = { version = "0.3", default-features = false } +lapack-src = { version = "0.5", default-features = false } # clippy = "*" [dev-dependencies] From e911bfc7db307709709f5d184d851d69bbbc8625 Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Mon, 23 Dec 2019 23:41:18 +0100 Subject: [PATCH 60/67] Fix doc-test for vector slerp. --- src/base/matrix.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 7df3cb3b..5c6693e6 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -1622,10 +1622,10 @@ impl> Unit> { /// # Examples: /// /// ``` - /// # use nalgebra::Vector2; + /// # use nalgebra::{Unit, Vector2}; /// - /// let v1 = Vector2::new(1.0, 2.0); - /// let v2 = Vector2::new(2.0, -3.0); + /// let v1 = Unit::new_normalize(Vector2::new(1.0, 2.0)); + /// let v2 = Unit::new_normalize(Vector2::new(2.0, -3.0)); /// /// let v = v1.slerp(&v2, 1.0); /// From 6d608cb99f4f3d1125b50ae5d5caf488794508b9 Mon Sep 17 00:00:00 2001 From: "S.Brandeis" Date: Sat, 25 Jan 2020 20:17:49 +0100 Subject: [PATCH 61/67] Revert "Broader PartialEq implementation for types implementing Dim trait" This reverts commit 6f5c9c0f --- src/base/dimension.rs | 122 +++++++++++++++++++----------------------- 1 file changed, 55 insertions(+), 67 deletions(-) diff --git a/src/base/dimension.rs b/src/base/dimension.rs index 17462d41..112996d3 100644 --- a/src/base/dimension.rs +++ b/src/base/dimension.rs @@ -14,7 +14,7 @@ use typenum::{ use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// Dim of dynamically-sized algebraic entities. -#[derive(Clone, Copy, Eq, Debug)] +#[derive(Clone, Copy, Eq, PartialEq, Debug)] pub struct Dynamic { value: usize, } @@ -107,12 +107,6 @@ impl Sub for Dynamic { } } -impl PartialEq for Dynamic { - fn eq(&self, other: &T) -> bool { - self.value() == other.value() - } -} - /* * * Operations. @@ -247,6 +241,60 @@ impl NamedDim for typenum::U1 { type Name = U1; } +macro_rules! named_dimension ( + ($($D: ident),* $(,)*) => {$( + /// A type level dimension. + #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] + #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] + pub struct $D; + + impl Dim for $D { + #[inline] + fn try_to_usize() -> Option { + Some(typenum::$D::to_usize()) + } + + #[inline] + fn from_usize(dim: usize) -> Self { + assert!(dim == typenum::$D::to_usize(), "Mismatched dimension."); + $D + } + + #[inline] + fn value(&self) -> usize { + typenum::$D::to_usize() + } + } + + impl DimName for $D { + type Value = typenum::$D; + + #[inline] + fn name() -> Self { + $D + } + } + + impl NamedDim for typenum::$D { + type Name = $D; + } + + impl IsNotStaticOne for $D { } + )*} +); + +// We give explicit names to all Unsigned in [0, 128[ +named_dimension!( + U0, /*U1,*/ U2, U3, U4, U5, U6, U7, U8, U9, U10, U11, U12, U13, U14, U15, U16, U17, U18, + U19, U20, U21, U22, U23, U24, U25, U26, U27, U28, U29, U30, U31, U32, U33, U34, U35, U36, U37, + U38, U39, U40, U41, U42, U43, U44, U45, U46, U47, U48, U49, U50, U51, U52, U53, U54, U55, U56, + U57, U58, U59, U60, U61, U62, U63, U64, U65, U66, U67, U68, U69, U70, U71, U72, U73, U74, U75, + U76, U77, U78, U79, U80, U81, U82, U83, U84, U85, U86, U87, U88, U89, U90, U91, U92, U93, U94, + U95, U96, U97, U98, U99, U100, U101, U102, U103, U104, U105, U106, U107, U108, U109, U110, + U111, U112, U113, U114, U115, U116, U117, U118, U119, U120, U121, U122, U123, U124, U125, U126, + U127 +); + // For values greater than U1023, just use the typenum binary representation directly. impl< A: Bit + Any + Debug + Copy + PartialEq + Send + Sync, @@ -360,63 +408,3 @@ impl { } - -macro_rules! named_dimension( - ($($D: ident),* $(,)*) => {$( - /// A type level dimension. - #[derive(Debug, Copy, Clone, Hash, Eq)] - #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] - pub struct $D; - - impl Dim for $D { - #[inline] - fn try_to_usize() -> Option { - Some(typenum::$D::to_usize()) - } - - #[inline] - fn from_usize(dim: usize) -> Self { - assert!(dim == typenum::$D::to_usize(), "Mismatched dimension."); - $D - } - - #[inline] - fn value(&self) -> usize { - typenum::$D::to_usize() - } - } - - impl DimName for $D { - type Value = typenum::$D; - - #[inline] - fn name() -> Self { - $D - } - } - - impl NamedDim for typenum::$D { - type Name = $D; - } - - impl IsNotStaticOne for $D { } - - impl PartialEq for $D { - fn eq(&self, other: &T) -> bool { - self.value() == other.value() - } - } - )*} -); - -// We give explicit names to all Unsigned in [0, 128[ -named_dimension!( - U0, /*U1,*/ U2, U3, U4, U5, U6, U7, U8, U9, U10, U11, U12, U13, U14, U15, U16, U17, U18, - U19, U20, U21, U22, U23, U24, U25, U26, U27, U28, U29, U30, U31, U32, U33, U34, U35, U36, U37, - U38, U39, U40, U41, U42, U43, U44, U45, U46, U47, U48, U49, U50, U51, U52, U53, U54, U55, U56, - U57, U58, U59, U60, U61, U62, U63, U64, U65, U66, U67, U68, U69, U70, U71, U72, U73, U74, U75, - U76, U77, U78, U79, U80, U81, U82, U83, U84, U85, U86, U87, U88, U89, U90, U91, U92, U93, U94, - U95, U96, U97, U98, U99, U100, U101, U102, U103, U104, U105, U106, U107, U108, U109, U110, - U111, U112, U113, U114, U115, U116, U117, U118, U119, U120, U121, U122, U123, U124, U125, U126, - U127, -); From bfd1a578d4ba4ea3be8ce57f437903e293dfc7ec Mon Sep 17 00:00:00 2001 From: Alexander Bulaev Date: Sun, 19 Jan 2020 18:45:25 +0300 Subject: [PATCH 62/67] ci: link to system BLAS as intended --- ci/build.sh | 4 ++-- ci/test.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ci/build.sh b/ci/build.sh index 550c9a69..07a2e155 100755 --- a/ci/build.sh +++ b/ci/build.sh @@ -13,7 +13,7 @@ if [ -z "$NO_STD" ]; then cargo build --verbose -p nalgebra --features "debug"; cargo build --verbose -p nalgebra --all-features else - cargo build -p nalgebra-lapack; + cargo build --manifest-path nalgebra-lapack/Cargo.toml --features "netlib" --no-default-features; fi else if [ "$CARGO_FEATURES" == "alloc" ]; then @@ -25,4 +25,4 @@ EOF rustup component add rust-src cargo install xargo xargo build --verbose --no-default-features --target=x86_64-unknown-linux-gnu --features "${CARGO_FEATURES}"; -fi \ No newline at end of file +fi diff --git a/ci/test.sh b/ci/test.sh index a3a27fb2..04b298f2 100755 --- a/ci/test.sh +++ b/ci/test.sh @@ -9,6 +9,6 @@ if [ -z "$NO_STD" ]; then cargo test --verbose --all-features; cd nalgebra-glm; cargo test --verbose; else - cd nalgebra-lapack; cargo test --verbose; + cd nalgebra-lapack; cargo test --features "netlib" --no-default-features --verbose; fi -fi \ No newline at end of file +fi From 4d97447cbd29765d10bb24c2d87f7979cd4ecbff Mon Sep 17 00:00:00 2001 From: "S.Brandeis" Date: Sat, 25 Jan 2020 20:24:21 +0100 Subject: [PATCH 63/67] Add type annotations in serde tests --- tests/core/serde.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/core/serde.rs b/tests/core/serde.rs index c209e35c..781ace77 100644 --- a/tests/core/serde.rs +++ b/tests/core/serde.rs @@ -14,7 +14,8 @@ macro_rules! test_serde( fn $test() { let v: $ty = rand::random(); let serialized = serde_json::to_string(&v).unwrap(); - assert_eq!(v, serde_json::from_str(&serialized).unwrap()); + let deserialized: $ty = serde_json::from_str(&serialized).unwrap(); + assert_eq!(v, deserialized); } )*} ); @@ -23,7 +24,8 @@ macro_rules! test_serde( fn serde_dmatrix() { let v: DMatrix = DMatrix::new_random(3, 4); let serialized = serde_json::to_string(&v).unwrap(); - assert_eq!(v, serde_json::from_str(&serialized).unwrap()); + let deserialized: DMatrix = serde_json::from_str(&serialized).unwrap(); + assert_eq!(v, deserialized); } test_serde!( From a62d9d1ad1f4b261871081c92bdd53f84e43c703 Mon Sep 17 00:00:00 2001 From: Ilya Epifanov Date: Wed, 5 Feb 2020 14:24:27 +0100 Subject: [PATCH 64/67] GivensRotation::new() should default to (I, 0) --- src/linalg/givens.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/linalg/givens.rs b/src/linalg/givens.rs index ed93a83c..20e2c309 100644 --- a/src/linalg/givens.rs +++ b/src/linalg/givens.rs @@ -38,7 +38,8 @@ impl GivensRotation { /// Initializes a Givens rotation from its non-normalized cosine an sine components. pub fn new(c: N, s: N) -> (Self, N) { - Self::try_new(c, s, N::RealField::zero()).unwrap() + Self::try_new(c, s, N::RealField::zero()) + .unwrap_or_else(|| (GivensRotation::identity(), N::zero())) } /// Initializes a Givens rotation form its non-normalized cosine an sine components. From 54f1c717426a700bc52c4dcdd3c6c4ac555b69f8 Mon Sep 17 00:00:00 2001 From: nnmm Date: Sun, 23 Feb 2020 16:30:11 +0100 Subject: [PATCH 65/67] Small docs improvements --- src/geometry/orthographic.rs | 2 +- src/geometry/perspective.rs | 4 ++-- src/geometry/transform_alias.rs | 12 ++++++------ src/lib.rs | 8 ++++---- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index 2c713118..1ac0d264 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -16,7 +16,7 @@ use crate::base::{Matrix4, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; -/// A 3D orthographic projection stored as an homogeneous 4x4 matrix. +/// A 3D orthographic projection stored as a homogeneous 4x4 matrix. pub struct Orthographic3 { matrix: Matrix4, } diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index 8020c0cf..45bb7aad 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -17,7 +17,7 @@ use crate::base::{Matrix4, Scalar, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; -/// A 3D perspective projection stored as an homogeneous 4x4 matrix. +/// A 3D perspective projection stored as a homogeneous 4x4 matrix. pub struct Perspective3 { matrix: Matrix4, } @@ -89,7 +89,7 @@ impl Perspective3 { /// Wraps the given matrix to interpret it as a 3D perspective matrix. /// - /// It is not checked whether or not the given matrix actually represents an orthographic + /// It is not checked whether or not the given matrix actually represents a perspective /// projection. #[inline] pub fn from_matrix_unchecked(matrix: Matrix4) -> Self { diff --git a/src/geometry/transform_alias.rs b/src/geometry/transform_alias.rs index 1ccc5f95..58a5ee1e 100644 --- a/src/geometry/transform_alias.rs +++ b/src/geometry/transform_alias.rs @@ -2,16 +2,16 @@ use crate::base::dimension::{U2, U3}; use crate::geometry::{TAffine, TGeneral, TProjective, Transform}; -/// A 2D general transformation that may not be invertible. Stored as an homogeneous 3x3 matrix. +/// A 2D general transformation that may not be invertible. Stored as a homogeneous 3x3 matrix. pub type Transform2 = Transform; -/// An invertible 2D general transformation. Stored as an homogeneous 3x3 matrix. +/// An invertible 2D general transformation. Stored as a homogeneous 3x3 matrix. pub type Projective2 = Transform; -/// A 2D affine transformation. Stored as an homogeneous 3x3 matrix. +/// A 2D affine transformation. Stored as a homogeneous 3x3 matrix. pub type Affine2 = Transform; -/// A 3D general transformation that may not be inversible. Stored as an homogeneous 4x4 matrix. +/// A 3D general transformation that may not be inversible. Stored as a homogeneous 4x4 matrix. pub type Transform3 = Transform; -/// An invertible 3D general transformation. Stored as an homogeneous 4x4 matrix. +/// An invertible 3D general transformation. Stored as a homogeneous 4x4 matrix. pub type Projective3 = Transform; -/// A 3D affine transformation. Stored as an homogeneous 4x4 matrix. +/// A 3D affine transformation. Stored as a homogeneous 4x4 matrix. pub type Affine3 = Transform; diff --git a/src/lib.rs b/src/lib.rs index 0a8bfb07..055b10ff 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -51,7 +51,7 @@ an optimized set of tools for computer graphics and physics. Those features incl allocated on the heap. * Convenient aliases for low-dimensional matrices and vectors: `Vector1` to `Vector6` and `Matrix1x1` to `Matrix6x6`, including rectangular matrices like `Matrix2x5`. -* Points sizes known at compile time, and convenience aliases:: `Point1` to `Point6`. +* Points sizes known at compile time, and convenience aliases: `Point1` to `Point6`. * Translation (seen as a transformation that composes by multiplication): `Translation2`, `Translation3`. * Rotation matrices: `Rotation2`, `Rotation3`. @@ -60,10 +60,10 @@ an optimized set of tools for computer graphics and physics. Those features incl * Algebraic entities with a norm equal to one: `Unit`, e.g., `Unit>`. * Isometries (translation ⨯ rotation): `Isometry2`, `Isometry3` * Similarity transformations (translation ⨯ rotation ⨯ uniform scale): `Similarity2`, `Similarity3`. -* Affine transformations stored as an homogeneous matrix: `Affine2`, `Affine3`. -* Projective (i.e. invertible) transformations stored as an homogeneous matrix: `Projective2`, +* Affine transformations stored as a homogeneous matrix: `Affine2`, `Affine3`. +* Projective (i.e. invertible) transformations stored as a homogeneous matrix: `Projective2`, `Projective3`. -* General transformations that does not have to be invertible, stored as an homogeneous matrix: +* General transformations that does not have to be invertible, stored as a homogeneous matrix: `Transform2`, `Transform3`. * 3D projections for computer graphics: `Perspective3`, `Orthographic3`. * Matrix factorizations: `Cholesky`, `QR`, `LU`, `FullPivLU`, `SVD`, `Schur`, `Hessenberg`, `SymmetricEigen`. From b09d9770f7720650f25fa23563e432ed938891ed Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Wed, 1 Jan 2020 15:59:46 +0100 Subject: [PATCH 66/67] Add a method to set the magnitude of a vector. --- src/base/norm.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/base/norm.rs b/src/base/norm.rs index 93319ddc..629fd297 100644 --- a/src/base/norm.rs +++ b/src/base/norm.rs @@ -185,6 +185,21 @@ impl> Matrix { self.norm_squared() } + + /// Sets the magnitude of this vector unless it is smaller than `min_magnitude`. + /// + /// If `self.magnitude()` is smaller than `min_magnitude`, it will be left unchanged. + /// Otherwise this is equivalent to: `*self = self.normalize() * magnitude. + #[inline] + pub fn try_set_magnitude(&mut self, magnitude: N::RealField, min_magnitude: N::RealField) + where S: StorageMut { + let n = self.norm(); + + if n >= min_magnitude { + self.scale_mut(magnitude / n) + } + } + /// Returns a normalized version of this matrix. #[inline] pub fn normalize(&self) -> MatrixMN @@ -225,7 +240,7 @@ impl> Matrix /// Normalizes this matrix in-place or does nothing if its norm is smaller or equal to `eps`. /// - /// If the normalization succeeded, returns the old normal of this matrix. + /// If the normalization succeeded, returns the old norm of this matrix. #[inline] pub fn try_normalize_mut(&mut self, min_norm: N::RealField) -> Option { let n = self.norm(); From fb69a42878b9abb9264ca3c4912388b0ed968dda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Mon, 2 Mar 2020 12:39:05 +0100 Subject: [PATCH 67/67] Release v0.20.0. --- Cargo.toml | 2 +- examples/cargo/Cargo.toml | 2 +- nalgebra-glm/Cargo.toml | 4 ++-- nalgebra-lapack/Cargo.toml | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1569b0a6..f25718e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nalgebra" -version = "0.19.0" +version = "0.20.0" authors = [ "Sébastien Crozet " ] description = "Linear algebra library with transformations and statically-sized or dynamically-sized matrices." diff --git a/examples/cargo/Cargo.toml b/examples/cargo/Cargo.toml index 6f450ec7..95ec21c8 100644 --- a/examples/cargo/Cargo.toml +++ b/examples/cargo/Cargo.toml @@ -4,7 +4,7 @@ version = "0.0.0" authors = [ "You" ] [dependencies] -nalgebra = "0.11.0" +nalgebra = "0.20.0" [[bin]] name = "example" diff --git a/nalgebra-glm/Cargo.toml b/nalgebra-glm/Cargo.toml index fbc8e4ca..7c4aacb2 100644 --- a/nalgebra-glm/Cargo.toml +++ b/nalgebra-glm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nalgebra-glm" -version = "0.5.0" +version = "0.6.0" authors = ["sebcrozet "] description = "A computer-graphics oriented API for nalgebra, inspired by the C++ GLM library." @@ -25,4 +25,4 @@ abomonation-serialize = [ "nalgebra/abomonation-serialize" ] num-traits = { version = "0.2", default-features = false } approx = { version = "0.3", default-features = false } alga = { version = "0.9", default-features = false } -nalgebra = { path = "..", version = "0.19", default-features = false } +nalgebra = { path = "..", version = "0.20", default-features = false } diff --git a/nalgebra-lapack/Cargo.toml b/nalgebra-lapack/Cargo.toml index 817d92ef..d00b31d9 100644 --- a/nalgebra-lapack/Cargo.toml +++ b/nalgebra-lapack/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nalgebra-lapack" -version = "0.11.0" +version = "0.12.0" authors = [ "Sébastien Crozet ", "Andrew Straw " ] description = "Linear algebra library with transformations and satically-sized or dynamically-sized matrices." @@ -23,7 +23,7 @@ accelerate = ["lapack-src/accelerate"] intel-mkl = ["lapack-src/intel-mkl"] [dependencies] -nalgebra = { version = "0.19", path = ".." } +nalgebra = { version = "0.20", path = ".." } num-traits = "0.2" num-complex = { version = "0.2", default-features = false } alga = { version = "0.9", default-features = false } @@ -34,7 +34,7 @@ lapack-src = { version = "0.5", default-features = false } # clippy = "*" [dev-dependencies] -nalgebra = { version = "0.19", path = "..", features = [ "arbitrary" ] } +nalgebra = { version = "0.20", path = "..", features = [ "arbitrary" ] } quickcheck = "0.9" approx = "0.3" rand = "0.7"