From 8c6ebf2757403a6c6e018178215e464f2dce9b8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Mon, 2 Aug 2021 18:41:46 +0200 Subject: [PATCH] Implement the single-allocator-trait approach. --- CHANGELOG.md | 7 + benches/core/matrix.rs | 9 +- nalgebra-lapack/src/cholesky.rs | 10 +- nalgebra-lapack/src/eigen.rs | 45 +- nalgebra-lapack/src/hessenberg.rs | 5 +- nalgebra-lapack/src/lib.rs | 1 - nalgebra-lapack/src/lu.rs | 16 +- nalgebra-lapack/src/qr.rs | 13 +- nalgebra-lapack/src/schur.rs | 14 +- nalgebra-lapack/src/svd.rs | 15 +- nalgebra-lapack/src/symmetric_eigen.rs | 5 +- nalgebra-sparse/src/convert/impl_std_ops.rs | 26 +- nalgebra-sparse/src/convert/serial.rs | 18 +- nalgebra-sparse/src/coo.rs | 2 +- nalgebra-sparse/src/ops/impl_std_ops.rs | 14 +- nalgebra-sparse/src/ops/serial/cs.rs | 2 +- nalgebra-sparse/src/ops/serial/csc.rs | 2 +- nalgebra-sparse/src/ops/serial/csr.rs | 2 +- nalgebra-sparse/src/pattern.rs | 2 +- src/base/alias.rs | 13 +- src/base/allocator.rs | 91 +- src/base/array_storage.rs | 118 +-- src/base/blas.rs | 700 +++---------- src/base/blas_uninit.rs | 359 +++++++ src/base/construction.rs | 249 +++-- src/base/construction_slice.rs | 20 +- src/base/conversion.rs | 208 ++-- src/base/coordinates.rs | 16 +- src/base/default_allocator.rs | 431 +++----- src/base/dimension.rs | 23 +- src/base/edition.rs | 199 ++-- src/base/indexing.rs | 74 +- src/base/iter.rs | 45 +- src/base/matrix.rs | 980 ++++++++----------- src/base/matrix_simba.rs | 6 +- src/base/matrix_slice.rs | 217 ++-- src/base/min_max.rs | 8 +- src/base/mod.rs | 3 + src/base/norm.rs | 9 +- src/base/ops.rs | 307 +++--- src/base/properties.rs | 5 +- src/base/scalar.rs | 29 +- src/base/statistics.rs | 39 +- src/base/storage.rs | 74 +- src/base/swizzle.rs | 8 +- src/base/uninit.rs | 76 ++ src/base/unit.rs | 18 +- src/base/vec_storage.rs | 169 ++-- src/debug/random_orthogonal.rs | 40 +- src/debug/random_sdp.rs | 37 +- src/geometry/dual_quaternion.rs | 51 +- src/geometry/dual_quaternion_construction.rs | 8 +- src/geometry/dual_quaternion_conversion.rs | 6 +- src/geometry/dual_quaternion_ops.rs | 12 +- src/geometry/isometry.rs | 28 +- src/geometry/isometry_construction.rs | 4 +- src/geometry/orthographic.rs | 70 +- src/geometry/perspective.rs | 5 +- src/geometry/point.rs | 135 +-- src/geometry/point_construction.rs | 40 +- src/geometry/point_conversion.rs | 109 ++- src/geometry/point_coordinates.rs | 6 +- src/geometry/point_ops.rs | 4 +- src/geometry/point_simba.rs | 4 +- src/geometry/quaternion.rs | 28 +- src/geometry/quaternion_construction.rs | 8 +- src/geometry/quaternion_conversion.rs | 10 +- src/geometry/quaternion_coordinates.rs | 5 +- src/geometry/quaternion_ops.rs | 10 +- src/geometry/reflection.rs | 20 +- src/geometry/rotation.rs | 31 +- src/geometry/rotation_specialization.rs | 8 +- src/geometry/similarity.rs | 7 +- src/geometry/similarity_construction.rs | 4 +- src/geometry/transform.rs | 87 +- src/geometry/transform_ops.rs | 7 +- src/geometry/translation.rs | 41 +- src/geometry/translation_construction.rs | 4 +- src/geometry/translation_conversion.rs | 14 +- src/geometry/translation_coordinates.rs | 4 +- src/lib.rs | 2 +- src/linalg/balancing.rs | 3 +- src/linalg/bidiagonal.rs | 190 ++-- src/linalg/cholesky.rs | 84 +- src/linalg/col_piv_qr.rs | 67 +- src/linalg/convolution.rs | 6 +- src/linalg/exp.rs | 10 +- src/linalg/full_piv_lu.rs | 45 +- src/linalg/hessenberg.rs | 97 +- src/linalg/householder.rs | 53 +- src/linalg/lu.rs | 55 +- src/linalg/permutation_sequence.rs | 85 +- src/linalg/pow.rs | 22 +- src/linalg/qr.rs | 79 +- src/linalg/schur.rs | 71 +- src/linalg/svd.rs | 14 +- src/linalg/symmetric_eigen.rs | 46 +- src/linalg/symmetric_tridiagonal.rs | 58 +- src/linalg/udu.rs | 44 +- src/proptest/mod.rs | 11 +- src/sparse/cs_matrix.rs | 8 +- src/sparse/cs_matrix_cholesky.rs | 8 +- src/sparse/cs_matrix_ops.rs | 4 +- src/sparse/cs_matrix_solve.rs | 2 +- src/third_party/alga/alga_matrix.rs | 20 +- src/third_party/glam/common/glam_matrix.rs | 22 +- src/third_party/mint/mint_matrix.rs | 11 +- src/third_party/mint/mint_point.rs | 2 +- src/third_party/mint/mint_quaternion.rs | 2 +- tests/core/matrix.rs | 2 +- 110 files changed, 2877 insertions(+), 3795 deletions(-) create mode 100644 src/base/blas_uninit.rs create mode 100644 src/base/uninit.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 04ea1c34..5af293ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,13 @@ documented here. This project adheres to [Semantic Versioning](https://semver.org/). +## [0.29.0] +### Modified +- The closure given to `apply`, `zip_apply`, `zip_zip_apply` must now modify the + first argument inplace, instead of returning a new value. This makes these + methods more versatile, and avoid useless clones when using non-Copy scalar + types. + ## [0.28.0] ### Added - Implement `Hash` for `Transform`. diff --git a/benches/core/matrix.rs b/benches/core/matrix.rs index d13d54e9..3c483c35 100644 --- a/benches/core/matrix.rs +++ b/benches/core/matrix.rs @@ -1,7 +1,4 @@ -use na::{ - Const, DMatrix, DVector, Dynamic, Matrix2, Matrix3, Matrix4, OMatrix, Vector2, Vector3, - Vector4, U10, -}; +use na::{DMatrix, DVector, Matrix2, Matrix3, Matrix4, OMatrix, Vector2, Vector3, Vector4, U10}; use rand::Rng; use rand_isaac::IsaacRng; use std::ops::{Add, Div, Mul, Sub}; @@ -189,7 +186,7 @@ fn axpy(bench: &mut criterion::Criterion) { fn tr_mul_to(bench: &mut criterion::Criterion) { let a = DMatrix::::new_random(1000, 1000); let b = DVector::::new_random(1000); - let mut c = DVector::new_uninitialized_generic(Dynamic::new(1000), Const::<1>); + let mut c = DVector::from_element(1000, 0.0); bench.bench_function("tr_mul_to", move |bh| bh.iter(|| a.tr_mul_to(&b, &mut c))); } @@ -197,7 +194,7 @@ fn tr_mul_to(bench: &mut criterion::Criterion) { fn mat_mul_mat(bench: &mut criterion::Criterion) { let a = DMatrix::::new_random(100, 100); let b = DMatrix::::new_random(100, 100); - let mut ab = DMatrix::new_uninitialized_generic(Dynamic::new(100), Dynamic::new(100)); + let mut ab = DMatrix::::from_element(100, 100, 0.0); bench.bench_function("mat_mul_mat", move |bh| { bh.iter(|| { diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs index 929f2d40..ea4b1d94 100644 --- a/nalgebra-lapack/src/cholesky.rs +++ b/nalgebra-lapack/src/cholesky.rs @@ -6,7 +6,7 @@ use num_complex::Complex; use na::allocator::Allocator; use na::dimension::Dim; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, Scalar}; use lapack; @@ -24,17 +24,17 @@ use lapack; OMatrix: Deserialize<'de>")) )] #[derive(Clone, Debug)] -pub struct Cholesky +pub struct Cholesky where DefaultAllocator: Allocator, { l: OMatrix, } -impl Copy for Cholesky +impl Copy for Cholesky where DefaultAllocator: Allocator, - Owned: Copy, + OMatrix: Copy, { } @@ -104,7 +104,7 @@ where b: &Matrix, ) -> Option> where - S2: Storage, + S2: RawStorage, DefaultAllocator: Allocator, { let mut res = b.clone_owned(); diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 49fb72b4..a8f87d85 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -1,5 +1,3 @@ -use std::fmt; - #[cfg(feature = "serde-serialize")] use serde::{Deserialize, Serialize}; @@ -11,7 +9,7 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -34,7 +32,8 @@ use lapack; OMatrix: Deserialize<'de>") ) )] -pub struct Eigen +#[derive(Clone, Debug)] +pub struct Eigen where DefaultAllocator: Allocator + Allocator, { @@ -46,7 +45,7 @@ where pub left_eigenvectors: Option>, } -impl Copy for Eigen +impl Copy for Eigen where DefaultAllocator: Allocator + Allocator, OVector: Copy, @@ -54,36 +53,6 @@ where { } -impl Clone for Eigen -where - DefaultAllocator: Allocator + Allocator, - OVector: Clone, - OMatrix: Clone, -{ - fn clone(&self) -> Self { - Self { - eigenvalues: self.eigenvalues.clone(), - eigenvectors: self.eigenvectors.clone(), - left_eigenvectors: self.left_eigenvectors.clone(), - } - } -} - -impl fmt::Debug for Eigen -where - DefaultAllocator: Allocator + Allocator, - OVector: fmt::Debug, - OMatrix: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Eigen") - .field("eigenvalues", &self.eigenvalues) - .field("eigenvectors", &self.eigenvectors) - .field("left_eigenvectors", &self.left_eigenvectors) - .finish() - } -} - impl Eigen where DefaultAllocator: Allocator + Allocator, @@ -104,13 +73,11 @@ where let ljob = if left_eigenvectors { b'V' } else { b'T' }; let rjob = if eigenvectors { b'V' } else { b'T' }; - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let n = nrows.value(); let lda = n as i32; - // IMPORTANT TODO: this is still UB. - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; // TODO: Tap into the workspace. let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; @@ -275,7 +242,7 @@ where "Unable to compute the eigenvalue decomposition of a non-square matrix." ); - let nrows = m.data.shape().0; + let nrows = m.shape_generic().0; let n = nrows.value(); let lda = n as i32; diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index dab38c40..7f854cb6 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -4,7 +4,7 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, DimDiff, DimSub, U1}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -48,7 +48,7 @@ where { /// Computes the hessenberg decomposition of the matrix `m`. pub fn new(mut m: OMatrix) -> Self { - let nrows = m.data.shape().0; + let nrows = m.shape_generic().0; let n = nrows.value() as i32; assert!( @@ -60,7 +60,6 @@ where "Unable to compute the hessenberg decomposition of an empty matrix." ); - // IMPORTANT TODO: this is still UB. let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.sub(Const::<1>), Const::<1>).assume_init() }; diff --git a/nalgebra-lapack/src/lib.rs b/nalgebra-lapack/src/lib.rs index fccf2717..9a027772 100644 --- a/nalgebra-lapack/src/lib.rs +++ b/nalgebra-lapack/src/lib.rs @@ -140,7 +140,6 @@ impl ComplexHelper for Complex { } } -// This is UB. unsafe fn uninitialized_vec(n: usize) -> Vec { let mut res = Vec::new(); res.reserve_exact(n); diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs index 2130fc7e..7d4a5a43 100644 --- a/nalgebra-lapack/src/lu.rs +++ b/nalgebra-lapack/src/lu.rs @@ -4,7 +4,7 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -61,7 +61,7 @@ where { /// Computes the LU decomposition with partial (row) pivoting of `matrix`. pub fn new(mut m: OMatrix) -> Self { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let nrows = nrows.value() as i32; let ncols = ncols.value() as i32; @@ -87,7 +87,7 @@ where #[inline] #[must_use] pub fn l(&self) -> OMatrix> { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); res.fill_upper_triangle(Zero::zero(), 1); @@ -100,7 +100,7 @@ where #[inline] #[must_use] pub fn u(&self) -> OMatrix, C> { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = self.lu.rows_generic(0, nrows.min(ncols)).into_owned(); res.fill_lower_triangle(Zero::zero(), 1); @@ -115,7 +115,7 @@ where #[inline] #[must_use] pub fn p(&self) -> OMatrix { - let (dim, _) = self.lu.data.shape(); + let (dim, _) = self.lu.shape_generic(); let mut id = Matrix::identity_generic(dim, dim); self.permute(&mut id); @@ -191,7 +191,7 @@ where b: &Matrix, ) -> Option> where - S2: Storage, + S2: RawStorage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); @@ -209,7 +209,7 @@ where b: &Matrix, ) -> Option> where - S2: Storage, + S2: RawStorage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); @@ -227,7 +227,7 @@ where b: &Matrix, ) -> Option> where - S2: Storage, + S2: RawStorage, DefaultAllocator: Allocator + Allocator, { let mut res = b.clone_owned(); diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index 2209b86e..dc4d81d7 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -7,7 +7,7 @@ use num_complex::Complex; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -54,11 +54,12 @@ where { /// Computes the QR decomposition of the matrix `m`. pub fn new(mut m: OMatrix) -> Self { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut info = 0; - let mut tau = - unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; + let mut tau = unsafe { + Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() + }; if nrows.value() == 0 || ncols.value() == 0 { return Self { qr: m, tau }; @@ -93,7 +94,7 @@ where #[inline] #[must_use] pub fn r(&self) -> OMatrix, C> { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle() } } @@ -119,7 +120,7 @@ where #[inline] #[must_use] pub fn q(&self) -> OMatrix> { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let min_nrows_ncols = nrows.min(ncols); if min_nrows_ncols.value() == 0 { diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 7325bb8f..9543fea2 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -9,7 +9,7 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -71,7 +71,7 @@ where "Unable to compute the eigenvalue decomposition of a non-square matrix." ); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let n = nrows.value(); let lda = n as i32; @@ -153,15 +153,15 @@ where where DefaultAllocator: Allocator, D>, { - let mut out = - unsafe { OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>) }; + let mut out = unsafe { + OVector::new_uninitialized_generic(self.t.shape_generic().0, Const::<1>).assume_init() + }; for i in 0..out.len() { - out[i] = MaybeUninit::new(Complex::new(self.re[i], self.im[i])); + out[i] = Complex::new(self.re[i], self.im[i]) } - // Safety: all entries have been initialized. - unsafe { out.assume_init() } + out } } diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 5bf4758a..872c368d 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -6,7 +6,7 @@ use std::cmp; use na::allocator::Allocator; use na::dimension::{Const, Dim, DimMin, DimMinimum, U1}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -89,7 +89,7 @@ macro_rules! svd_impl( Allocator<$t, DimMinimum> { fn compute(mut m: OMatrix<$t, R, C>) -> Option> { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); if nrows.value() == 0 || ncols.value() == 0 { return None; @@ -99,7 +99,6 @@ macro_rules! svd_impl( let lda = nrows.value() as i32; - // IMPORTANT TODO: this is still UB. let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows).assume_init() }; let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), Const::<1>).assume_init() }; let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols).assume_init() }; @@ -152,8 +151,8 @@ macro_rules! svd_impl( /// been manually changed by the user. #[inline] pub fn recompose(self) -> OMatrix<$t, R, C> { - let nrows = self.u.data.shape().0; - let ncols = self.vt.data.shape().1; + let nrows = self.u.shape_generic().0; + let ncols = self.vt.shape_generic().1; let min_nrows_ncols = nrows.min(ncols); let mut res: OMatrix<_, R, C> = Matrix::zeros_generic(nrows, ncols); @@ -178,8 +177,8 @@ macro_rules! svd_impl( #[inline] #[must_use] pub fn pseudo_inverse(&self, epsilon: $t) -> OMatrix<$t, C, R> { - let nrows = self.u.data.shape().0; - let ncols = self.vt.data.shape().1; + let nrows = self.u.shape_generic().0; + let ncols = self.vt.shape_generic().1; let min_nrows_ncols = nrows.min(ncols); let mut res: OMatrix<_, C, R> = Matrix::zeros_generic(ncols, nrows); @@ -242,7 +241,7 @@ macro_rules! svd_complex_impl( Allocator, R, R> + Allocator, C, C> + Allocator<$t, DimMinimum> { - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); if nrows.value() == 0 || ncols.value() == 0 { return None; diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index 7a1f6f2e..f70e9a4d 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -9,7 +9,7 @@ use simba::scalar::RealField; use crate::ComplexHelper; use na::allocator::Allocator; use na::dimension::{Const, Dim}; -use na::storage::Storage; +use na::storage::RawStorage; use na::{DefaultAllocator, Matrix, OMatrix, OVector, Scalar}; use lapack; @@ -89,12 +89,11 @@ where let jobz = if eigenvectors { b'V' } else { b'T' }; - let nrows = m.data.shape().0; + let nrows = m.shape_generic().0; let n = nrows.value(); let lda = n as i32; - // IMPORTANT TODO: this is still UB. let mut values = unsafe { Matrix::new_uninitialized_generic(nrows, Const::<1>).assume_init() }; let mut info = 0; diff --git a/nalgebra-sparse/src/convert/impl_std_ops.rs b/nalgebra-sparse/src/convert/impl_std_ops.rs index d775fa13..683227e2 100644 --- a/nalgebra-sparse/src/convert/impl_std_ops.rs +++ b/nalgebra-sparse/src/convert/impl_std_ops.rs @@ -2,14 +2,16 @@ use crate::convert::serial::*; use crate::coo::CooMatrix; use crate::csc::CscMatrix; use crate::csr::CsrMatrix; -use nalgebra::storage::Storage; +use nalgebra::storage::RawStorage; use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar}; use num_traits::Zero; -impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CooMatrix +impl<'a, T, R, C, S> From<&'a Matrix> for CooMatrix where - T: Scalar + Zero + PartialEq, - S: Storage, + T: Scalar + Zero, + R: Dim, + C: Dim, + S: RawStorage, { fn from(matrix: &'a Matrix) -> Self { convert_dense_coo(matrix) @@ -43,10 +45,12 @@ where } } -impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CsrMatrix +impl<'a, T, R, C, S> From<&'a Matrix> for CsrMatrix where - T: Scalar + Zero + PartialEq, - S: Storage, + T: Scalar + Zero, + R: Dim, + C: Dim, + S: RawStorage, { fn from(matrix: &'a Matrix) -> Self { convert_dense_csr(matrix) @@ -80,10 +84,12 @@ where } } -impl<'a, T, R: Dim, C: Dim, S> From<&'a Matrix> for CscMatrix +impl<'a, T, R, C, S> From<&'a Matrix> for CscMatrix where - T: Scalar + Zero + PartialEq, - S: Storage, + T: Scalar + Zero, + R: Dim, + C: Dim, + S: RawStorage, { fn from(matrix: &'a Matrix) -> Self { convert_dense_csc(matrix) diff --git a/nalgebra-sparse/src/convert/serial.rs b/nalgebra-sparse/src/convert/serial.rs index ebdf4e65..f84a6583 100644 --- a/nalgebra-sparse/src/convert/serial.rs +++ b/nalgebra-sparse/src/convert/serial.rs @@ -7,7 +7,7 @@ use std::ops::Add; use num_traits::Zero; -use nalgebra::storage::Storage; +use nalgebra::storage::RawStorage; use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar}; use crate::coo::CooMatrix; @@ -16,10 +16,12 @@ use crate::csc::CscMatrix; use crate::csr::CsrMatrix; /// Converts a dense matrix to [`CooMatrix`]. -pub fn convert_dense_coo(dense: &Matrix) -> CooMatrix +pub fn convert_dense_coo(dense: &Matrix) -> CooMatrix where - T: Scalar + Zero + PartialEq, - S: Storage, + T: Scalar + Zero, + R: Dim, + C: Dim, + S: RawStorage, { let mut coo = CooMatrix::new(dense.nrows(), dense.ncols()); @@ -91,10 +93,10 @@ where /// Converts a dense matrix to a [`CsrMatrix`]. pub fn convert_dense_csr(dense: &Matrix) -> CsrMatrix where - T: Scalar + Zero + PartialEq, + T: Scalar + Zero, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { let mut row_offsets = Vec::with_capacity(dense.nrows() + 1); let mut col_idx = Vec::new(); @@ -168,10 +170,10 @@ where /// Converts a dense matrix to a [`CscMatrix`]. pub fn convert_dense_csc(dense: &Matrix) -> CscMatrix where - T: Scalar + Zero + PartialEq, + T: Scalar + Zero, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { let mut col_offsets = Vec::with_capacity(dense.ncols() + 1); let mut row_idx = Vec::new(); diff --git a/nalgebra-sparse/src/coo.rs b/nalgebra-sparse/src/coo.rs index 679dbdb2..34e5ceec 100644 --- a/nalgebra-sparse/src/coo.rs +++ b/nalgebra-sparse/src/coo.rs @@ -57,7 +57,7 @@ impl CooMatrix { /// Panics if any part of the dense matrix is out of bounds of the sparse matrix /// when inserted at `(r, c)`. #[inline] - pub fn push_matrix>( + pub fn push_matrix>( &mut self, r: usize, c: usize, diff --git a/nalgebra-sparse/src/ops/impl_std_ops.rs b/nalgebra-sparse/src/ops/impl_std_ops.rs index 11d59ded..721023a5 100644 --- a/nalgebra-sparse/src/ops/impl_std_ops.rs +++ b/nalgebra-sparse/src/ops/impl_std_ops.rs @@ -6,8 +6,8 @@ use crate::ops::serial::{ spmm_csc_prealloc, spmm_csr_dense, spmm_csr_pattern, spmm_csr_prealloc, }; use crate::ops::Op; -use nalgebra::allocator::{Allocator, InnerAllocator}; -use nalgebra::base::storage::Storage; +use nalgebra::allocator::Allocator; +use nalgebra::base::storage::RawStorage; use nalgebra::constraint::{DimEq, ShapeConstraint}; use nalgebra::{ ClosedAdd, ClosedDiv, ClosedMul, ClosedSub, DefaultAllocator, Dim, Dynamic, Matrix, OMatrix, @@ -28,7 +28,7 @@ macro_rules! impl_bin_op { // Note: The Neg bound is currently required because we delegate e.g. // Sub to SpAdd with negative coefficients. This is not well-defined for // unsigned data types. - $($scalar_type: $($bounds + )? Scalar + ClosedAdd + ClosedSub + ClosedMul + Zero + One + Neg + PartialEq)? + $($scalar_type: $($bounds + )? Scalar + ClosedAdd + ClosedSub + ClosedMul + Zero + One + Neg)? { type Output = $ret; fn $method(self, $b: $b_type) -> Self::Output { @@ -272,7 +272,7 @@ macro_rules! impl_spmm_cs_dense { ($matrix_type_name:ident, $spmm_fn:ident) => { // Implement ref-ref impl_spmm_cs_dense!(&'a $matrix_type_name, &'a Matrix, $spmm_fn, |lhs, rhs| { - let (_, ncols) = rhs.data.shape(); + let (_, ncols) = rhs.shape_generic(); let nrows = Dynamic::new(lhs.nrows()); let mut result = OMatrix::::zeros_generic(nrows, ncols); $spmm_fn(T::zero(), &mut result, T::one(), Op::NoOp(lhs), Op::NoOp(rhs)); @@ -301,14 +301,14 @@ macro_rules! impl_spmm_cs_dense { T: Scalar + ClosedMul + ClosedAdd + ClosedSub + ClosedDiv + Neg + Zero + One, R: Dim, C: Dim, - S: Storage, + S: RawStorage, DefaultAllocator: Allocator, // TODO: Is it possible to simplify these bounds? ShapeConstraint: // Bounds so that we can turn OMatrix into a DMatrixSliceMut - DimEq>::Buffer as Storage>::RStride> + DimEq>::Buffer as RawStorage>::RStride> + DimEq - + DimEq>::Buffer as Storage>::CStride> + + DimEq>::Buffer as RawStorage>::CStride> // Bounds so that we can turn &Matrix into a DMatrixSlice + DimEq + DimEq diff --git a/nalgebra-sparse/src/ops/serial/cs.rs b/nalgebra-sparse/src/ops/serial/cs.rs index 9c799339..db057705 100644 --- a/nalgebra-sparse/src/ops/serial/cs.rs +++ b/nalgebra-sparse/src/ops/serial/cs.rs @@ -74,7 +74,7 @@ pub fn spadd_cs_prealloc( a: Op<&CsMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, + T: Scalar + ClosedAdd + ClosedMul + Zero + One, { match a { Op::NoOp(a) => { diff --git a/nalgebra-sparse/src/ops/serial/csc.rs b/nalgebra-sparse/src/ops/serial/csc.rs index 5f6868c1..25e59f26 100644 --- a/nalgebra-sparse/src/ops/serial/csc.rs +++ b/nalgebra-sparse/src/ops/serial/csc.rs @@ -55,7 +55,7 @@ pub fn spadd_csc_prealloc( a: Op<&CscMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, + T: Scalar + ClosedAdd + ClosedMul + Zero + One, { assert_compatible_spadd_dims!(c, a); spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) diff --git a/nalgebra-sparse/src/ops/serial/csr.rs b/nalgebra-sparse/src/ops/serial/csr.rs index dc8e937b..fa317bbf 100644 --- a/nalgebra-sparse/src/ops/serial/csr.rs +++ b/nalgebra-sparse/src/ops/serial/csr.rs @@ -50,7 +50,7 @@ pub fn spadd_csr_prealloc( a: Op<&CsrMatrix>, ) -> Result<(), OperationError> where - T: Scalar + ClosedAdd + ClosedMul + Zero + One + PartialEq, + T: Scalar + ClosedAdd + ClosedMul + Zero + One, { assert_compatible_spadd_dims!(c, a); spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) diff --git a/nalgebra-sparse/src/pattern.rs b/nalgebra-sparse/src/pattern.rs index 8bc71075..85f6bc1a 100644 --- a/nalgebra-sparse/src/pattern.rs +++ b/nalgebra-sparse/src/pattern.rs @@ -311,7 +311,7 @@ impl From for SparseFormatError { } impl fmt::Display for SparsityPatternFormatError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { SparsityPatternFormatError::InvalidOffsetArrayLength => { write!(f, "Length of offset array is not equal to (major_dim + 1).") diff --git a/src/base/alias.rs b/src/base/alias.rs index a866935d..68829d9a 100644 --- a/src/base/alias.rs +++ b/src/base/alias.rs @@ -1,9 +1,12 @@ #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; +use crate::base::storage::Owned; #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; -use crate::base::{ArrayStorage, Const, Matrix, Owned, Unit}; +use crate::base::{ArrayStorage, Const, Matrix, Unit}; +use crate::storage::OwnedUninit; +use std::mem::MaybeUninit; /* * @@ -18,13 +21,16 @@ use crate::base::{ArrayStorage, Const, Matrix, Owned, Unit}; /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** pub type OMatrix = Matrix>; +/// An owned matrix with uninitialized data. +pub type UninitMatrix = Matrix, R, C, OwnedUninit>; + /// An owned matrix column-major matrix with `R` rows and `C` columns. /// /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** #[deprecated( note = "use SMatrix for a statically-sized matrix using integer dimensions, or OMatrix for an owned matrix using types as dimensions." )] -pub type MatrixMN = OMatrix; +pub type MatrixMN = Matrix>; /// An owned matrix column-major matrix with `D` columns. /// @@ -277,6 +283,9 @@ pub type OVector = Matrix>; /// A statically sized D-dimensional column vector. pub type SVector = Matrix, U1, ArrayStorage>; // Owned, U1>>; +/// An owned matrix with uninitialized data. +pub type UninitVector = Matrix, D, U1, OwnedUninit>; + /// An owned matrix column-major matrix with `R` rows and `C` columns. /// /// **Because this is an alias, not all its methods are listed here. See the [`Matrix`](crate::base::Matrix) type too.** diff --git a/src/base/allocator.rs b/src/base/allocator.rs index 1f639d3d..4d0c27b7 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -1,11 +1,14 @@ //! Abstract definition of a matrix data storage allocator. -use std::mem::{ManuallyDrop, MaybeUninit}; +use std::any::Any; use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use crate::base::dimension::{Dim, U1}; -use crate::base::storage::ContiguousStorageMut; -use crate::base::DefaultAllocator; +use crate::base::{DefaultAllocator, Scalar}; +use crate::storage::{IsContiguous, RawStorageMut}; +use crate::StorageMut; +use std::fmt::Debug; +use std::mem::MaybeUninit; /// A matrix allocator of a memory buffer that may contain `R::to_usize() * C::to_usize()` /// elements of type `T`. @@ -16,12 +19,23 @@ use crate::base::DefaultAllocator; /// /// Every allocator must be both static and dynamic. Though not all implementations may share the /// same `Buffer` type. -/// -/// If you also want to be able to create uninitizalized or manually dropped memory buffers, see -/// [`Allocator`]. -pub trait InnerAllocator: 'static + Sized { +pub trait Allocator: Any + Sized { /// The type of buffer this allocator can instanciate. - type Buffer: ContiguousStorageMut; + type Buffer: StorageMut + IsContiguous + Clone + Debug; + /// The type of buffer with uninitialized components this allocator can instanciate. + type BufferUninit: RawStorageMut, R, C> + IsContiguous; + + /// Allocates a buffer with the given number of rows and columns without initializing its content. + unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> MaybeUninit; + /// Allocates a buffer with the given number of rows and columns without initializing its content. + fn allocate_uninit(nrows: R, ncols: C) -> Self::BufferUninit; + + /// Assumes a data buffer to be initialized. + /// + /// # Safety + /// The user must make sure that every single entry of the buffer has been initialized, + /// or Undefined Behavior will immediately occur. + unsafe fn assume_init(uninit: Self::BufferUninit) -> Self::Buffer; /// Allocates a buffer initialized with the content of the given iterator. fn allocate_from_iterator>( @@ -31,45 +45,15 @@ pub trait InnerAllocator: 'static + Sized { ) -> Self::Buffer; } -/// Same as the [`InnerAllocator`] trait, but also provides methods to build uninitialized buffers, -/// or buffers whose entries must be manually dropped. -pub trait Allocator: - InnerAllocator - + InnerAllocator, R, C> - + InnerAllocator, R, C> -{ - /// Allocates a buffer with the given number of rows and columns without initializing its content. - fn allocate_uninitialized( - nrows: R, - ncols: C, - ) -> , R, C>>::Buffer; - - /// Assumes a data buffer to be initialized. This operation should be near zero-cost. - /// - /// # Safety - /// The user must make sure that every single entry of the buffer has been initialized, - /// or Undefined Behavior will immediately occur. - unsafe fn assume_init( - uninit: , R, C>>::Buffer, - ) -> >::Buffer; - - /// Specifies that a given buffer's entries should be manually dropped. - fn manually_drop( - buf: >::Buffer, - ) -> , R, C>>::Buffer; -} - - -/// A matrix reallocator. Changes the size of the memory buffer that initially contains (RFrom × -/// CFrom) elements to a smaller or larger size (RTo, CTo). -pub trait Reallocator: +/// A matrix reallocator. Changes the size of the memory buffer that initially contains (`RFrom` × +/// `CFrom`) elements to a smaller or larger size (`RTo`, `CTo`). +pub trait Reallocator: Allocator + Allocator { /// Reallocates a buffer of shape `(RTo, CTo)`, possibly reusing a previously allocated buffer /// `buf`. Data stored by `buf` are linearly copied to the output: /// /// # Safety - /// **NO! THIS IS STILL UB!** /// * The copy is performed as if both were just arrays (without a matrix structure). /// * If `buf` is larger than the output size, then extra elements of `buf` are truncated. /// * If `buf` is smaller than the output size, then extra elements of the output are left @@ -77,8 +61,8 @@ pub trait Reallocator: unsafe fn reallocate_copy( nrows: RTo, ncols: CTo, - buf: >::Buffer, - ) -> >::Buffer; + buf: >::Buffer, + ) -> >::Buffer; } /// The number of rows of the result of a componentwise operation on two matrices. @@ -89,16 +73,23 @@ pub type SameShapeC = >:: // TODO: Bad name. /// Restricts the given number of rows and columns to be respectively the same. -pub trait SameShapeAllocator: +pub trait SameShapeAllocator: Allocator + Allocator, SameShapeC> where + R1: Dim, + R2: Dim, + C1: Dim, + C2: Dim, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { } -impl SameShapeAllocator - for DefaultAllocator +impl SameShapeAllocator for DefaultAllocator where + R1: Dim, + R2: Dim, + C1: Dim, + C2: Dim, DefaultAllocator: Allocator + Allocator, SameShapeC>, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -106,15 +97,19 @@ where // XXX: Bad name. /// Restricts the given number of rows to be equal. -pub trait SameShapeVectorAllocator: +pub trait SameShapeVectorAllocator: Allocator + Allocator> + SameShapeAllocator where + R1: Dim, + R2: Dim, ShapeConstraint: SameNumberOfRows, { } -impl SameShapeVectorAllocator for DefaultAllocator +impl SameShapeVectorAllocator for DefaultAllocator where + R1: Dim, + R2: Dim, DefaultAllocator: Allocator + Allocator>, ShapeConstraint: SameNumberOfRows, { diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 0b397c33..65a43c2b 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -1,5 +1,4 @@ use std::fmt::{self, Debug, Formatter}; -use std::mem; // use std::hash::{Hash, Hasher}; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; @@ -13,28 +12,43 @@ use serde::ser::SerializeSeq; use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] use std::marker::PhantomData; +#[cfg(feature = "serde-serialize-no-std")] +use std::mem; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; -use crate::allocator::InnerAllocator; +use crate::base::allocator::Allocator; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, ToTypenum}; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, ReshapableStorage, Storage, StorageMut, -}; -use crate::base::Owned; +use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage}; +use crate::base::Scalar; +use crate::Storage; /* * - * Static Storage. + * Static RawStorage. * */ /// A array-based statically sized matrix data storage. -#[repr(transparent)] +#[repr(C)] #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct ArrayStorage(pub [[T; R]; C]); +impl ArrayStorage { + #[inline] + pub fn as_slice(&self) -> &[T] { + // SAFETY: this is OK because ArrayStorage is contiguous. + unsafe { self.as_slice_unchecked() } + } + + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [T] { + // SAFETY: this is OK because ArrayStorage is contiguous. + unsafe { self.as_mut_slice_unchecked() } + } +} + // TODO: remove this once the stdlib implements Default for arrays. impl Default for ArrayStorage where @@ -53,10 +67,8 @@ impl Debug for ArrayStorage { } } -unsafe impl Storage, Const> +unsafe impl RawStorage, Const> for ArrayStorage -where - DefaultAllocator: InnerAllocator, Const, Buffer = Self>, { type RStride = Const<1>; type CStride = Const; @@ -81,38 +93,36 @@ where true } - #[inline] - fn into_owned(self) -> Owned, Const> - where - DefaultAllocator: InnerAllocator, Const>, - { - Owned(self) - } - - #[inline] - fn clone_owned(&self) -> Owned, Const> - where - T: Clone, - DefaultAllocator: InnerAllocator, Const>, - { - let it = self.as_slice().iter().cloned(); - Owned(DefaultAllocator::allocate_from_iterator( - self.shape().0, - self.shape().1, - it, - )) - } - #[inline] unsafe fn as_slice_unchecked(&self) -> &[T] { std::slice::from_raw_parts(self.ptr(), R * C) } } -unsafe impl StorageMut, Const> +unsafe impl Storage, Const> for ArrayStorage where - DefaultAllocator: InnerAllocator, Const, Buffer = Self>, + DefaultAllocator: Allocator, Const, Buffer = Self>, +{ + #[inline] + fn into_owned(self) -> Owned, Const> + where + DefaultAllocator: Allocator, Const>, + { + self + } + + #[inline] + fn clone_owned(&self) -> Owned, Const> + where + DefaultAllocator: Allocator, Const>, + { + self.clone() + } +} + +unsafe impl RawStorageMut, Const> + for ArrayStorage { #[inline] fn ptr_mut(&mut self) -> *mut T { @@ -125,23 +135,12 @@ where } } -unsafe impl ContiguousStorage, Const> - for ArrayStorage -where - DefaultAllocator: InnerAllocator, Const, Buffer = Self>, -{ -} - -unsafe impl ContiguousStorageMut, Const> - for ArrayStorage -where - DefaultAllocator: InnerAllocator, Const, Buffer = Self>, -{ -} +unsafe impl IsContiguous for ArrayStorage {} impl ReshapableStorage, Const, Const, Const> for ArrayStorage where + T: Scalar, Const: ToTypenum, Const: ToTypenum, Const: ToTypenum, @@ -159,8 +158,8 @@ where fn reshape_generic(self, _: Const, _: Const) -> Self::Output { unsafe { - let data: [[T; R2]; C2] = mem::transmute_copy(&self.0); - mem::forget(self.0); + let data: [[T; R2]; C2] = std::mem::transmute_copy(&self.0); + std::mem::forget(self.0); ArrayStorage(data) } } @@ -175,7 +174,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl Serialize for ArrayStorage where - T: Serialize, + T: Scalar + Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -194,7 +193,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const R: usize, const C: usize> Deserialize<'a> for ArrayStorage where - T: Deserialize<'a>, + T: Scalar + Deserialize<'a>, { fn deserialize(deserializer: D) -> Result where @@ -211,7 +210,10 @@ struct ArrayStorageVisitor { } #[cfg(feature = "serde-serialize-no-std")] -impl ArrayStorageVisitor { +impl ArrayStorageVisitor +where + T: Scalar, +{ /// Construct a new sequence visitor. pub fn new() -> Self { ArrayStorageVisitor { @@ -223,7 +225,7 @@ impl ArrayStorageVisitor { #[cfg(feature = "serde-serialize-no-std")] impl<'a, T, const R: usize, const C: usize> Visitor<'a> for ArrayStorageVisitor where - T: Deserialize<'a>, + T: Scalar + Deserialize<'a>, { type Value = ArrayStorage; @@ -255,13 +257,13 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable - for ArrayStorage +unsafe impl + bytemuck::Zeroable for ArrayStorage { } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod +unsafe impl bytemuck::Pod for ArrayStorage { } @@ -269,7 +271,7 @@ unsafe impl bytemuck::P #[cfg(feature = "abomonation-serialize")] impl Abomonation for ArrayStorage where - T: Abomonation, + T: Scalar + Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { for element in self.as_slice() { diff --git a/src/base/blas.rs b/src/base/blas.rs index 437ce7a7..c19011fd 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -1,21 +1,9 @@ -//! Implements a subset of the Basic Linear Algebra Subprograms (BLAS), a -//! standard and highly optimized set of basic vector and matrix operations. -//! -//! To avoid unsoundness due to mishandling of uninitialized data, we divide our -//! methods into two groups: those that take in a `&mut` to a matrix, and those -//! that return an owned matrix that would otherwise result from setting a -//! parameter to zero in the other methods. - -use crate::{MatrixSliceMut, SimdComplexField, VectorSliceMut}; -#[cfg(feature = "std")] -use matrixmultiply; +use crate::{RawStorage, SimdComplexField}; use num::{One, Zero}; use simba::scalar::{ClosedAdd, ClosedMul}; -#[cfg(feature = "std")] -use std::mem; -use std::mem::MaybeUninit; use crate::base::allocator::Allocator; +use crate::base::blas_uninit::{axcpy_uninit, gemm_uninit, gemv_uninit}; use crate::base::constraint::{ AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, }; @@ -24,9 +12,10 @@ use crate::base::storage::{Storage, StorageMut}; use crate::base::{ DVectorSlice, DefaultAllocator, Matrix, Scalar, SquareMatrix, Vector, VectorSlice, }; +use crate::core::uninit::Init; /// # Dot/scalar product -impl> Matrix +impl> Matrix where T: Scalar + Zero + ClosedAdd + ClosedMul, { @@ -37,7 +26,7 @@ where conjugate: impl Fn(T) -> T, ) -> T where - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { assert!( @@ -205,7 +194,7 @@ where #[must_use] pub fn dot(&self, rhs: &Matrix) -> T where - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { self.dotx(rhs, |e| e) @@ -235,7 +224,7 @@ where pub fn dotc(&self, rhs: &Matrix) -> T where T: SimdComplexField, - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { self.dotx(rhs, T::simd_conjugate) @@ -262,7 +251,7 @@ where #[must_use] pub fn tr_dot(&self, rhs: &Matrix) -> T where - SB: Storage, + SB: RawStorage, ShapeConstraint: DimEq + DimEq, { let (nrows, ncols) = self.shape(); @@ -293,10 +282,7 @@ where T: Scalar + Zero + ClosedAdd + ClosedMul, S: StorageMut, { - /// Computes `self = a * x * c + b * self`, where `a`, `b`, `c` are scalars, - /// and `x` is a vector of the same size as `self`. - /// - /// For commutative scalars, this is equivalent to an [`axpy`] call. + /// Computes `self = a * x * c + b * self`. /// /// If `b` is zero, `self` is never read from. /// @@ -316,34 +302,7 @@ where SB: Storage, ShapeConstraint: DimEq, { - assert_eq!(self.nrows(), x.nrows(), "Axcpy: mismatched vector shapes."); - - let rstride1 = self.strides().0; - let rstride2 = x.strides().0; - - unsafe { - // SAFETY: the conversion to slices is OK because we access the - // elements taking the strides into account. - let y = self.data.as_mut_slice_unchecked(); - let x = x.data.as_slice_unchecked(); - - if !b.is_zero() { - for i in 0..x.len() { - let y = y.get_unchecked_mut(i * rstride1); - *y = a.inlined_clone() - * x.get_unchecked(i * rstride2).inlined_clone() - * c.inlined_clone() - + b.inlined_clone() * y.inlined_clone(); - } - } else { - for i in 0..x.len() { - let y = y.get_unchecked_mut(i * rstride1); - *y = a.inlined_clone() - * x.get_unchecked(i * rstride2).inlined_clone() - * c.inlined_clone(); - } - } - } + unsafe { axcpy_uninit(Init, self, a, x, c, b) }; } /// Computes `self = a * x + b * self`. @@ -399,38 +358,8 @@ where SC: Storage, ShapeConstraint: DimEq + AreMultipliable, { - let dim1 = self.nrows(); - let (nrows2, ncols2) = a.shape(); - let dim3 = x.nrows(); - - assert!( - ncols2 == dim3 && dim1 == nrows2, - "Gemv: dimensions mismatch." - ); - - if ncols2 == 0 { - // NOTE: we can't just always multiply by beta - // because we documented the guaranty that `self` is - // never read if `beta` is zero. - if beta.is_zero() { - self.fill(T::zero()); - } else { - *self *= beta; - } - return; - } - - // TODO: avoid bound checks. - let col2 = a.column(0); - let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - self.axcpy(alpha.inlined_clone(), &col2, val, beta); - - for j in 1..ncols2 { - let col2 = a.column(j); - let val = unsafe { x.vget_unchecked(j).inlined_clone() }; - - self.axcpy(alpha.inlined_clone(), &col2, val, T::one()); - } + // Safety: this is safe because we are passing Status == Init. + unsafe { gemv_uninit(Init, self, alpha, a, x, beta) } } #[inline(always)] @@ -490,25 +419,6 @@ where } } - /// Computes `self = alpha * a * x + beta * self`, where `a` is a **symmetric** matrix, `x` a - /// vector, and `alpha, beta` two scalars. DEPRECATED: use `sygemv` instead. - #[inline] - #[deprecated(note = "This is renamed `sygemv` to match the original BLAS terminology.")] - pub fn gemv_symm( - &mut self, - alpha: T, - a: &SquareMatrix, - x: &Vector, - beta: T, - ) where - T: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + AreMultipliable, - { - self.sygemv(alpha, a, x, beta) - } - /// Computes `self = alpha * a * x + beta * self`, where `a` is a **symmetric** matrix, `x` a /// vector, and `alpha, beta` two scalars. /// @@ -709,331 +619,6 @@ where } } -impl Vector, D, S> -where - T: Scalar + Zero + ClosedAdd + ClosedMul, - S: StorageMut, D>, -{ - /// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and - /// `alpha` is a scalar. - /// - /// `self` must be completely uninitialized, or data leaks will occur. After - /// this method is called, all entries in `self` will be initialized. - #[inline] - pub fn axc( - &mut self, - a: T, - x: &Vector, - c: T, - ) -> VectorSliceMut - where - S2: Storage, - ShapeConstraint: DimEq, - { - let rstride1 = self.strides().0; - let rstride2 = x.strides().0; - - // Safety: see each individual remark. - unsafe { - // We don't mind `x` and `y` not being contiguous, as we'll only - // access the elements we're allowed to. (TODO: double check this) - let y = self.data.as_mut_slice_unchecked(); - let x = x.data.as_slice_unchecked(); - - // The indices are within range, and only access elements that belong - // to `x` and `y` themselves. - for i in 0..y.len() { - *y.get_unchecked_mut(i * rstride1) = MaybeUninit::new( - a.inlined_clone() - * x.get_unchecked(i * rstride2).inlined_clone() - * c.inlined_clone(), - ); - } - - // We've initialized all elements. - self.assume_init_mut() - } - } - - /// Computes `alpha * a * x`, where `a` is a matrix, `x` a vector, and - /// `alpha` is a scalar. - /// - /// `self` must be completely uninitialized, or data leaks will occur. After - /// the method is called, `self` will be completely initialized. We return - /// an initialized mutable vector slice to `self` for convenience. - #[inline] - pub fn gemv_z( - &mut self, - alpha: T, - a: &Matrix, - x: &Vector, - ) -> VectorSliceMut - where - T: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + AreMultipliable, - { - let dim1 = self.nrows(); - let (nrows2, ncols2) = a.shape(); - let dim3 = x.nrows(); - - assert!( - ncols2 == dim3 && dim1 == nrows2, - "Gemv: dimensions mismatch." - ); - - if ncols2 == 0 { - self.fill_fn(|| MaybeUninit::new(T::zero())); - - // Safety: all entries have just been initialized. - unsafe { - return self.assume_init_mut(); - } - } - - // TODO: avoid bound checks. - let col2 = a.column(0); - let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - let mut init = self.axc(alpha.inlined_clone(), &col2, val); - - // Safety: all indices are within range. - unsafe { - for j in 1..ncols2 { - let col2 = a.column(j); - let val = x.vget_unchecked(j).inlined_clone(); - init.axcpy(alpha.inlined_clone(), &col2, val, T::one()); - } - } - - init - } - - #[inline(always)] - fn xxgemv_z( - &mut self, - alpha: T, - a: &SquareMatrix, - x: &Vector, - dot: impl Fn( - &DVectorSlice, - &DVectorSlice, - ) -> T, - ) where - T: One, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + AreMultipliable, - { - let dim1 = self.nrows(); - let dim2 = a.nrows(); - let dim3 = x.nrows(); - - assert!( - a.is_square(), - "Symmetric cgemv: the input matrix must be square." - ); - assert!( - dim2 == dim3 && dim1 == dim2, - "Symmetric cgemv: dimensions mismatch." - ); - - if dim2 == 0 { - return; - } - - // TODO: avoid bound checks. - let col2 = a.column(0); - let val = unsafe { x.vget_unchecked(0).inlined_clone() }; - let mut res = self.axc(alpha.inlined_clone(), &col2, val); - - res[0] += alpha.inlined_clone() * dot(&a.slice_range(1.., 0), &x.rows_range(1..)); - - for j in 1..dim2 { - let col2 = a.column(j); - let dot = dot(&col2.rows_range(j..), &x.rows_range(j..)); - - let val; - unsafe { - val = x.vget_unchecked(j).inlined_clone(); - *res.vget_unchecked_mut(j) += alpha.inlined_clone() * dot; - } - res.rows_range_mut(j + 1..).axpy( - alpha.inlined_clone() * val, - &col2.rows_range(j + 1..), - T::one(), - ); - } - } - - /// Computes `self = alpha * a * x`, where `a` is an **hermitian** matrix, `x` a - /// vector, and `alpha, beta` two scalars. - pub fn hegemv_z( - &mut self, - alpha: T, - a: &SquareMatrix, - x: &Vector, - ) where - T: SimdComplexField, - SB: Storage, - SC: Storage, - ShapeConstraint: DimEq + AreMultipliable, - { - self.xxgemv_z(alpha, a, x, |a, b| a.dotc(b)) - } -} - -impl, R1, C1>> Matrix, R1, C1, S> -where - T: Scalar + Zero + One + ClosedAdd + ClosedMul, - // DefaultAllocator: Allocator, -{ - /// Computes `alpha * a * b`, where `a` and `b` are matrices, and `alpha` is - /// a scalar. - /// - /// # Examples: - /// - /// ``` - /// # #[macro_use] extern crate approx; - /// # use nalgebra::{Matrix2x3, Matrix3x4, Matrix2x4}; - /// let mut mat1 = Matrix2x4::identity(); - /// let mat2 = Matrix2x3::new(1.0, 2.0, 3.0, - /// 4.0, 5.0, 6.0); - /// let mat3 = Matrix3x4::new(0.1, 0.2, 0.3, 0.4, - /// 0.5, 0.6, 0.7, 0.8, - /// 0.9, 1.0, 1.1, 1.2); - /// let expected = mat2 * mat3 * 10.0 + mat1 * 5.0; - /// - /// mat1.gemm(10.0, &mat2, &mat3, 5.0); - /// assert_relative_eq!(mat1, expected); - /// ``` - #[inline] - pub fn gemm_z( - &mut self, - alpha: T, - a: &Matrix, - b: &Matrix, - ) -> MatrixSliceMut - where - SB: Storage, - SC: Storage, - ShapeConstraint: SameNumberOfRows - + SameNumberOfColumns - + AreMultipliable, - { - let ncols1 = self.ncols(); - - #[cfg(feature = "std")] - { - // We assume large matrices will be Dynamic but small matrices static. - // We could use matrixmultiply for large statically-sized matrices but the performance - // threshold to activate it would be different from SMALL_DIM because our code optimizes - // better for statically-sized matrices. - if R1::is::() - || C1::is::() - || R2::is::() - || C2::is::() - || R3::is::() - || C3::is::() - { - // matrixmultiply can be used only if the std feature is available. - let nrows1 = self.nrows(); - let (nrows2, ncols2) = a.shape(); - let (nrows3, ncols3) = b.shape(); - - // Threshold determined empirically. - const SMALL_DIM: usize = 5; - - if nrows1 > SMALL_DIM - && ncols1 > SMALL_DIM - && nrows2 > SMALL_DIM - && ncols2 > SMALL_DIM - { - assert_eq!( - ncols2, nrows3, - "gemm: dimensions mismatch for multiplication." - ); - assert_eq!( - (nrows1, ncols1), - (nrows2, ncols3), - "gemm: dimensions mismatch for addition." - ); - - // NOTE: this case should never happen because we enter this - // codepath only when ncols2 > SMALL_DIM. Though we keep this - // here just in case if in the future we change the conditions to - // enter this codepath. - if ncols1 == 0 { - self.fill_fn(|| MaybeUninit::new(T::zero())); - - // Safety: there's no (uninitialized) values. - return unsafe { self.assume_init_mut() }; - } - - let (rsa, csa) = a.strides(); - let (rsb, csb) = b.strides(); - let (rsc, csc) = self.strides(); - - if T::is::() { - unsafe { - matrixmultiply::sgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f32, - rsa as isize, - csa as isize, - b.data.ptr() as *const f32, - rsb as isize, - csb as isize, - 0.0, - self.data.ptr_mut() as *mut f32, - rsc as isize, - csc as isize, - ); - } - } else if T::is::() { - unsafe { - matrixmultiply::dgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f64, - rsa as isize, - csa as isize, - b.data.ptr() as *const f64, - rsb as isize, - csb as isize, - 0.0, - self.data.ptr_mut() as *mut f64, - rsc as isize, - csc as isize, - ); - } - } - - // Safety: all entries have been initialized. - unsafe { - return self.assume_init_mut(); - } - } - } - } - - for j1 in 0..ncols1 { - // TODO: avoid bound checks. - let _ = self - .column_mut(j1) - .gemv_z(alpha.inlined_clone(), a, &b.column(j1)); - } - - // Safety: all entries have been initialized. - unsafe { self.assume_init_mut() } - } -} - impl> Matrix where T: Scalar + Zero + ClosedAdd + ClosedMul, @@ -1170,122 +755,9 @@ where + SameNumberOfColumns + AreMultipliable, { - let ncols1 = self.ncols(); - - #[cfg(feature = "std")] - { - // We assume large matrices will be Dynamic but small matrices static. - // We could use matrixmultiply for large statically-sized matrices but the performance - // threshold to activate it would be different from SMALL_DIM because our code optimizes - // better for statically-sized matrices. - if R1::is::() - || C1::is::() - || R2::is::() - || C2::is::() - || R3::is::() - || C3::is::() - { - // matrixmultiply can be used only if the std feature is available. - let nrows1 = self.nrows(); - let (nrows2, ncols2) = a.shape(); - let (nrows3, ncols3) = b.shape(); - - // Threshold determined empirically. - const SMALL_DIM: usize = 5; - - if nrows1 > SMALL_DIM - && ncols1 > SMALL_DIM - && nrows2 > SMALL_DIM - && ncols2 > SMALL_DIM - { - assert_eq!( - ncols2, nrows3, - "gemm: dimensions mismatch for multiplication." - ); - assert_eq!( - (nrows1, ncols1), - (nrows2, ncols3), - "gemm: dimensions mismatch for addition." - ); - - // NOTE: this case should never happen because we enter this - // codepath only when ncols2 > SMALL_DIM. Though we keep this - // here just in case if in the future we change the conditions to - // enter this codepath. - if ncols2 == 0 { - // NOTE: we can't just always multiply by beta - // because we documented the guaranty that `self` is - // never read if `beta` is zero. - if beta.is_zero() { - self.fill(T::zero()); - } else { - *self *= beta; - } - return; - } - - if T::is::() { - let (rsa, csa) = a.strides(); - let (rsb, csb) = b.strides(); - let (rsc, csc) = self.strides(); - - unsafe { - matrixmultiply::sgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f32, - rsa as isize, - csa as isize, - b.data.ptr() as *const f32, - rsb as isize, - csb as isize, - mem::transmute_copy(&beta), - self.data.ptr_mut() as *mut f32, - rsc as isize, - csc as isize, - ); - } - return; - } else if T::is::() { - let (rsa, csa) = a.strides(); - let (rsb, csb) = b.strides(); - let (rsc, csc) = self.strides(); - - unsafe { - matrixmultiply::dgemm( - nrows2, - ncols2, - ncols3, - mem::transmute_copy(&alpha), - a.data.ptr() as *const f64, - rsa as isize, - csa as isize, - b.data.ptr() as *const f64, - rsb as isize, - csb as isize, - mem::transmute_copy(&beta), - self.data.ptr_mut() as *mut f64, - rsc as isize, - csc as isize, - ); - } - return; - } - } - } - } - - for j1 in 0..ncols1 { - // TODO: avoid bound checks. - self.column_mut(j1).gemv( - alpha.inlined_clone(), - a, - &b.column(j1), - beta.inlined_clone(), - ); - } + // SAFETY: this is valid because our matrices are initialized and + // we are using status = Init. + unsafe { gemm_uninit(Init, self, alpha, a, b, beta) } } /// Computes `self = alpha * a.transpose() * b + beta * self`, where `a, b, self` are matrices. @@ -1579,33 +1051,78 @@ where /// let mid = DMatrix::from_row_slice(3, 3, &[0.1, 0.2, 0.3, /// 0.5, 0.6, 0.7, /// 0.9, 1.0, 1.1]); - /// + /// // The random shows that values on the workspace do not + /// // matter as they will be overwritten. + /// let mut workspace = DVector::new_random(2); /// let expected = &lhs * &mid * lhs.transpose() * 10.0 + &mat * 5.0; /// + /// mat.quadform_tr_with_workspace(&mut workspace, 10.0, &lhs, &mid, 5.0); + /// assert_relative_eq!(mat, expected); + pub fn quadform_tr_with_workspace( + &mut self, + work: &mut Vector, + alpha: T, + lhs: &Matrix, + mid: &SquareMatrix, + beta: T, + ) where + D2: Dim, + R3: Dim, + C3: Dim, + D4: Dim, + S2: StorageMut, + S3: Storage, + S4: Storage, + ShapeConstraint: DimEq + DimEq + DimEq + DimEq, + { + work.gemv(T::one(), lhs, &mid.column(0), T::zero()); + self.ger(alpha.inlined_clone(), work, &lhs.column(0), beta); + + for j in 1..mid.ncols() { + work.gemv(T::one(), lhs, &mid.column(j), T::zero()); + self.ger(alpha.inlined_clone(), work, &lhs.column(j), T::one()); + } + } + + /// Computes the quadratic form `self = alpha * lhs * mid * lhs.transpose() + beta * self`. + /// + /// This allocates a workspace vector of dimension D1 for intermediate results. + /// If `D1` is a type-level integer, then the allocation is performed on the stack. + /// Use `.quadform_tr_with_workspace(...)` instead to avoid allocations. + /// + /// # Examples: + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{Matrix2, Matrix3, Matrix2x3, Vector2}; + /// let mut mat = Matrix2::identity(); + /// let lhs = Matrix2x3::new(1.0, 2.0, 3.0, + /// 4.0, 5.0, 6.0); + /// let mid = Matrix3::new(0.1, 0.2, 0.3, + /// 0.5, 0.6, 0.7, + /// 0.9, 1.0, 1.1); + /// let expected = lhs * mid * lhs.transpose() * 10.0 + mat * 5.0; + /// /// mat.quadform_tr(10.0, &lhs, &mid, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform_tr( + pub fn quadform_tr( &mut self, alpha: T, lhs: &Matrix, mid: &SquareMatrix, beta: T, ) where + R3: Dim, + C3: Dim, + D4: Dim, S3: Storage, S4: Storage, - ShapeConstraint: DimEq + DimEq, - DefaultAllocator: Allocator, + ShapeConstraint: DimEq + DimEq + DimEq, + DefaultAllocator: Allocator, { - let mut work = - Matrix::new_uninitialized_generic(R3::from_usize(self.shape().0), Const::<1>); - let mut work = work.gemv_z(T::one(), lhs, &mid.column(0)); - - self.ger(alpha.inlined_clone(), &work, &lhs.column(0), beta); - - for j in 1..mid.ncols() { - work.gemv(T::one(), lhs, &mid.column(j), T::zero()); - self.ger(alpha.inlined_clone(), &work, &lhs.column(j), T::one()); - } + // TODO: would it be useful to avoid the zero-initialization of the workspace data? + let mut work = Matrix::zeros_generic(self.shape_generic().0, Const::<1>); + self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta) } /// Computes the quadratic form `self = alpha * rhs.transpose() * mid * rhs + beta * self`. @@ -1626,34 +1143,79 @@ where /// let mid = DMatrix::from_row_slice(3, 3, &[0.1, 0.2, 0.3, /// 0.5, 0.6, 0.7, /// 0.9, 1.0, 1.1]); - /// + /// // The random shows that values on the workspace do not + /// // matter as they will be overwritten. + /// let mut workspace = DVector::new_random(3); /// let expected = rhs.transpose() * &mid * &rhs * 10.0 + &mat * 5.0; /// - /// mat.quadform(10.0, &mid, &rhs, 5.0); + /// mat.quadform_with_workspace(&mut workspace, 10.0, &mid, &rhs, 5.0); /// assert_relative_eq!(mat, expected); - pub fn quadform( + pub fn quadform_with_workspace( &mut self, + work: &mut Vector, alpha: T, mid: &SquareMatrix, rhs: &Matrix, beta: T, ) where + D2: Dim, + D3: Dim, + R4: Dim, + C4: Dim, + S2: StorageMut, S3: Storage, S4: Storage, - ShapeConstraint: DimEq + DimEq + DimEq, - DefaultAllocator: Allocator, + ShapeConstraint: + DimEq + DimEq + DimEq + AreMultipliable, { - // TODO: figure out why type inference isn't doing its job. - let mut work = Matrix::new_uninitialized_generic(D3::from_usize(mid.shape().0), Const::<1>); - let mut work = work.gemv_z::(T::one(), mid, &rhs.column(0)); - + work.gemv(T::one(), mid, &rhs.column(0), T::zero()); self.column_mut(0) - .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); + .gemv_tr(alpha.inlined_clone(), rhs, work, beta.inlined_clone()); for j in 1..rhs.ncols() { - work.gemv::(T::one(), mid, &rhs.column(j), T::zero()); + work.gemv(T::one(), mid, &rhs.column(j), T::zero()); self.column_mut(j) - .gemv_tr(alpha.inlined_clone(), rhs, &work, beta.inlined_clone()); + .gemv_tr(alpha.inlined_clone(), rhs, work, beta.inlined_clone()); } } + + /// Computes the quadratic form `self = alpha * rhs.transpose() * mid * rhs + beta * self`. + /// + /// This allocates a workspace vector of dimension D2 for intermediate results. + /// If `D2` is a type-level integer, then the allocation is performed on the stack. + /// Use `.quadform_with_workspace(...)` instead to avoid allocations. + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{Matrix2, Matrix3x2, Matrix3}; + /// let mut mat = Matrix2::identity(); + /// let rhs = Matrix3x2::new(1.0, 2.0, + /// 3.0, 4.0, + /// 5.0, 6.0); + /// let mid = Matrix3::new(0.1, 0.2, 0.3, + /// 0.5, 0.6, 0.7, + /// 0.9, 1.0, 1.1); + /// let expected = rhs.transpose() * mid * rhs * 10.0 + mat * 5.0; + /// + /// mat.quadform(10.0, &mid, &rhs, 5.0); + /// assert_relative_eq!(mat, expected); + pub fn quadform( + &mut self, + alpha: T, + mid: &SquareMatrix, + rhs: &Matrix, + beta: T, + ) where + D2: Dim, + R3: Dim, + C3: Dim, + S2: Storage, + S3: Storage, + ShapeConstraint: DimEq + DimEq + AreMultipliable, + DefaultAllocator: Allocator, + { + // TODO: would it be useful to avoid the zero-initialization of the workspace data? + let mut work = Vector::zeros_generic(mid.shape_generic().0, Const::<1>); + self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta) + } } diff --git a/src/base/blas_uninit.rs b/src/base/blas_uninit.rs new file mode 100644 index 00000000..2b3c5fc3 --- /dev/null +++ b/src/base/blas_uninit.rs @@ -0,0 +1,359 @@ +/* + * This file implements some BLAS operations in such a way that they work + * even if the first argument (the output parameter) is an uninitialized matrix. + * + * Because doing this makes the code harder to read, we only implemented the operations that we + * know would benefit from this performance-wise, namely, GEMM (which we use for our matrix + * multiplication code). If we identify other operations like that in the future, we could add + * them here. + */ + +#[cfg(feature = "std")] +use matrixmultiply; +use num::{One, Zero}; +use simba::scalar::{ClosedAdd, ClosedMul}; +#[cfg(feature = "std")] +use std::mem; + +use crate::base::constraint::{ + AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, +}; +use crate::base::dimension::{Dim, Dynamic, U1}; +use crate::base::storage::{RawStorage, RawStorageMut}; +use crate::base::uninit::{InitStatus, Initialized}; +use crate::base::{Matrix, Scalar, Vector}; + +// # Safety +// The content of `y` must only contain values for which +// `Status::assume_init_mut` is sound. +#[allow(clippy::too_many_arguments)] +unsafe fn array_axcpy( + _: Status, + y: &mut [Status::Value], + a: T, + x: &[T], + c: T, + beta: T, + stride1: usize, + stride2: usize, + len: usize, +) where + Status: InitStatus, + T: Scalar + Zero + ClosedAdd + ClosedMul, +{ + for i in 0..len { + let y = Status::assume_init_mut(y.get_unchecked_mut(i * stride1)); + *y = a.inlined_clone() * x.get_unchecked(i * stride2).inlined_clone() * c.inlined_clone() + + beta.inlined_clone() * y.inlined_clone(); + } +} + +fn array_axc( + _: Status, + y: &mut [Status::Value], + a: T, + x: &[T], + c: T, + stride1: usize, + stride2: usize, + len: usize, +) where + Status: InitStatus, + T: Scalar + Zero + ClosedAdd + ClosedMul, +{ + for i in 0..len { + unsafe { + Status::init( + y.get_unchecked_mut(i * stride1), + a.inlined_clone() + * x.get_unchecked(i * stride2).inlined_clone() + * c.inlined_clone(), + ); + } + } +} + +/// Computes `self = a * x * c + b * self`. +/// +/// If `b` is zero, `self` is never read from. +/// +/// # Examples: +/// +/// ``` +/// # use nalgebra::Vector3; +/// let mut vec1 = Vector3::new(1.0, 2.0, 3.0); +/// let vec2 = Vector3::new(0.1, 0.2, 0.3); +/// vec1.axcpy(5.0, &vec2, 2.0, 5.0); +/// assert_eq!(vec1, Vector3::new(6.0, 12.0, 18.0)); +/// ``` +#[inline] +#[allow(clippy::many_single_char_names)] +pub unsafe fn axcpy_uninit( + status: Status, + y: &mut Vector, + a: T, + x: &Vector, + c: T, + b: T, +) where + T: Scalar + Zero + ClosedAdd + ClosedMul, + SA: RawStorageMut, + SB: RawStorage, + ShapeConstraint: DimEq, + Status: InitStatus, +{ + assert_eq!(y.nrows(), x.nrows(), "Axcpy: mismatched vector shapes."); + + let rstride1 = y.strides().0; + let rstride2 = x.strides().0; + + // SAFETY: the conversion to slices is OK because we access the + // elements taking the strides into account. + let y = y.data.as_mut_slice_unchecked(); + let x = x.data.as_slice_unchecked(); + + if !b.is_zero() { + array_axcpy(status, y, a, x, c, b, rstride1, rstride2, x.len()); + } else { + array_axc(status, y, a, x, c, rstride1, rstride2, x.len()); + } +} + +/// Computes `self = alpha * a * x + beta * self`, where `a` is a matrix, `x` a vector, and +/// `alpha, beta` two scalars. +/// +/// If `beta` is zero, `self` is never read. +/// +/// # Examples: +/// +/// ``` +/// # use nalgebra::{Matrix2, Vector2}; +/// let mut vec1 = Vector2::new(1.0, 2.0); +/// let vec2 = Vector2::new(0.1, 0.2); +/// let mat = Matrix2::new(1.0, 2.0, +/// 3.0, 4.0); +/// vec1.gemv(10.0, &mat, &vec2, 5.0); +/// assert_eq!(vec1, Vector2::new(10.0, 21.0)); +/// ``` +#[inline] +pub unsafe fn gemv_uninit( + status: Status, + y: &mut Vector, + alpha: T, + a: &Matrix, + x: &Vector, + beta: T, +) where + Status: InitStatus, + T: Scalar + Zero + One + ClosedAdd + ClosedMul, + SA: RawStorageMut, + SB: RawStorage, + SC: RawStorage, + ShapeConstraint: DimEq + AreMultipliable, +{ + let dim1 = y.nrows(); + let (nrows2, ncols2) = a.shape(); + let dim3 = x.nrows(); + + assert!( + ncols2 == dim3 && dim1 == nrows2, + "Gemv: dimensions mismatch." + ); + + if ncols2 == 0 { + if beta.is_zero() { + y.apply(|e| Status::init(e, T::zero())); + } else { + // SAFETY: this is UB if y is uninitialized. + y.apply(|e| *Status::assume_init_mut(e) *= beta.inlined_clone()); + } + return; + } + + // TODO: avoid bound checks. + let col2 = a.column(0); + let val = x.vget_unchecked(0).inlined_clone(); + + // SAFETY: this is the call that makes this method unsafe: it is UB if Status = Uninit and beta != 0. + axcpy_uninit(status, y, alpha.inlined_clone(), &col2, val, beta); + + for j in 1..ncols2 { + let col2 = a.column(j); + let val = x.vget_unchecked(j).inlined_clone(); + + // SAFETY: because y was initialized above, we can use the initialized status. + axcpy_uninit( + Initialized(status), + y, + alpha.inlined_clone(), + &col2, + val, + T::one(), + ); + } +} + +/// Computes `self = alpha * a * b + beta * self`, where `a, b, self` are matrices. +/// `alpha` and `beta` are scalar. +/// +/// If `beta` is zero, `self` is never read. +/// +/// # Examples: +/// +/// ``` +/// # #[macro_use] extern crate approx; +/// # use nalgebra::{Matrix2x3, Matrix3x4, Matrix2x4}; +/// let mut mat1 = Matrix2x4::identity(); +/// let mat2 = Matrix2x3::new(1.0, 2.0, 3.0, +/// 4.0, 5.0, 6.0); +/// let mat3 = Matrix3x4::new(0.1, 0.2, 0.3, 0.4, +/// 0.5, 0.6, 0.7, 0.8, +/// 0.9, 1.0, 1.1, 1.2); +/// let expected = mat2 * mat3 * 10.0 + mat1 * 5.0; +/// +/// mat1.gemm(10.0, &mat2, &mat3, 5.0); +/// assert_relative_eq!(mat1, expected); +/// ``` +#[inline] +pub unsafe fn gemm_uninit< + Status, + T, + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, + R3: Dim, + C3: Dim, + SA, + SB, + SC, +>( + status: Status, + y: &mut Matrix, + alpha: T, + a: &Matrix, + b: &Matrix, + beta: T, +) where + Status: InitStatus, + T: Scalar + Zero + One + ClosedAdd + ClosedMul, + SA: RawStorageMut, + SB: RawStorage, + SC: RawStorage, + ShapeConstraint: + SameNumberOfRows + SameNumberOfColumns + AreMultipliable, +{ + let ncols1 = y.ncols(); + + #[cfg(feature = "std")] + { + // We assume large matrices will be Dynamic but small matrices static. + // We could use matrixmultiply for large statically-sized matrices but the performance + // threshold to activate it would be different from SMALL_DIM because our code optimizes + // better for statically-sized matrices. + if R1::is::() + || C1::is::() + || R2::is::() + || C2::is::() + || R3::is::() + || C3::is::() + { + // matrixmultiply can be used only if the std feature is available. + let nrows1 = y.nrows(); + let (nrows2, ncols2) = a.shape(); + let (nrows3, ncols3) = b.shape(); + + // Threshold determined empirically. + const SMALL_DIM: usize = 5; + + if nrows1 > SMALL_DIM && ncols1 > SMALL_DIM && nrows2 > SMALL_DIM && ncols2 > SMALL_DIM + { + assert_eq!( + ncols2, nrows3, + "gemm: dimensions mismatch for multiplication." + ); + assert_eq!( + (nrows1, ncols1), + (nrows2, ncols3), + "gemm: dimensions mismatch for addition." + ); + + // NOTE: this case should never happen because we enter this + // codepath only when ncols2 > SMALL_DIM. Though we keep this + // here just in case if in the future we change the conditions to + // enter this codepath. + if ncols2 == 0 { + // NOTE: we can't just always multiply by beta + // because we documented the guaranty that `self` is + // never read if `beta` is zero. + if beta.is_zero() { + y.apply(|e| Status::init(e, T::zero())); + } else { + // SAFETY: this is UB if Status = Uninit + y.apply(|e| *Status::assume_init_mut(e) *= beta.inlined_clone()); + } + return; + } + + if T::is::() { + let (rsa, csa) = a.strides(); + let (rsb, csb) = b.strides(); + let (rsc, csc) = y.strides(); + + matrixmultiply::sgemm( + nrows2, + ncols2, + ncols3, + mem::transmute_copy(&alpha), + a.data.ptr() as *const f32, + rsa as isize, + csa as isize, + b.data.ptr() as *const f32, + rsb as isize, + csb as isize, + mem::transmute_copy(&beta), + y.data.ptr_mut() as *mut f32, + rsc as isize, + csc as isize, + ); + return; + } else if T::is::() { + let (rsa, csa) = a.strides(); + let (rsb, csb) = b.strides(); + let (rsc, csc) = y.strides(); + + matrixmultiply::dgemm( + nrows2, + ncols2, + ncols3, + mem::transmute_copy(&alpha), + a.data.ptr() as *const f64, + rsa as isize, + csa as isize, + b.data.ptr() as *const f64, + rsb as isize, + csb as isize, + mem::transmute_copy(&beta), + y.data.ptr_mut() as *mut f64, + rsc as isize, + csc as isize, + ); + return; + } + } + } + } + + for j1 in 0..ncols1 { + // TODO: avoid bound checks. + // SAFETY: this is UB if Status = Uninit && beta != 0 + gemv_uninit( + status, + &mut y.column_mut(j1), + alpha.inlined_clone(), + a, + &b.column(j1), + beta.inlined_clone(), + ); + } +} diff --git a/src/base/construction.rs b/src/base/construction.rs index 2a7a80da..ae129f0d 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -1,6 +1,8 @@ #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; +#[cfg(feature = "arbitrary")] +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -11,17 +13,49 @@ use rand::{ Rng, }; -use std::{iter, mem::MaybeUninit}; +use std::iter; use typenum::{self, Cmp, Greater}; use simba::scalar::{ClosedAdd, ClosedMul}; -use crate::base::allocator::{Allocator, InnerAllocator}; +use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimName, Dynamic, ToTypenum}; -use crate::base::storage::Storage; +use crate::base::storage::RawStorage; use crate::base::{ ArrayStorage, Const, DefaultAllocator, Matrix, OMatrix, OVector, Scalar, Unit, Vector, }; +use crate::UninitMatrix; +use std::mem::MaybeUninit; + +/// When "no_unsound_assume_init" is enabled, expands to `unimplemented!()` instead of `new_uninitialized_generic().assume_init()`. +/// Intended as a placeholder, each callsite should be refactored to use uninitialized memory soundly +#[macro_export] +macro_rules! unimplemented_or_uninitialized_generic { + ($nrows:expr, $ncols:expr) => {{ + #[cfg(feature="no_unsound_assume_init")] { + // Some of the call sites need the number of rows and columns from this to infer a type, so + // uninitialized memory is used to infer the type, as `T: Zero` isn't available at all callsites. + // This may technically still be UB even though the assume_init is dead code, but all callsites should be fixed before #556 is closed. + let typeinference_helper = crate::base::Matrix::new_uninitialized_generic($nrows, $ncols); + unimplemented!(); + typeinference_helper.assume_init() + } + #[cfg(not(feature="no_unsound_assume_init"))] { crate::base::Matrix::new_uninitialized_generic($nrows, $ncols).assume_init() } + }} +} + +impl UninitMatrix +where + DefaultAllocator: Allocator, +{ + pub fn uninit(nrows: R, ncols: C) -> Self { + // SAFETY: this is OK because the dimension automatically match the storage + // because we are building an owned storage. + unsafe { + Self::from_data_statically_unchecked(DefaultAllocator::allocate_uninit(nrows, ncols)) + } + } +} /// # Generic constructors /// This set of matrix and vector construction functions are all generic @@ -29,16 +63,23 @@ use crate::base::{ /// the dimension as inputs. /// /// These functions should only be used when working on dimension-generic code. -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { + /// Creates a new uninitialized matrix. + /// + /// # Safety + /// If the matrix has a compile-time dimension, this panics + /// if `nrows != R::to_usize()` or `ncols != C::to_usize()`. + #[inline] + pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> MaybeUninit { + Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols)) + } + /// Creates a matrix with all its elements set to `elem`. #[inline] - pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self - where - T: Clone, - { + pub fn from_element_generic(nrows: R, ncols: C, elem: T) -> Self { let len = nrows.value() * ncols.value(); Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len)) } @@ -47,10 +88,7 @@ where /// /// Same as `from_element_generic`. #[inline] - pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self - where - T: Clone, - { + pub fn repeat_generic(nrows: R, ncols: C, elem: T) -> Self { let len = nrows.value() * ncols.value(); Self::from_iterator_generic(nrows, ncols, iter::repeat(elem).take(len)) } @@ -59,7 +97,7 @@ where #[inline] pub fn zeros_generic(nrows: R, ncols: C) -> Self where - T: Zero + Clone, + T: Zero, { Self::from_element_generic(nrows, ncols, T::zero()) } @@ -79,37 +117,32 @@ where /// The order of elements in the slice must follow the usual mathematic writing, i.e., /// row-by-row. #[inline] - pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self - where - T: Clone, - { + pub fn from_row_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self { assert!( slice.len() == nrows.value() * ncols.value(), "Matrix init. error: the slice did not contain the right number of elements." ); - let mut res = Self::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); let mut iter = slice.iter(); - for i in 0..nrows.value() { - for j in 0..ncols.value() { - unsafe { - *res.get_unchecked_mut((i, j)) = MaybeUninit::new(iter.next().unwrap().clone()); + unsafe { + for i in 0..nrows.value() { + for j in 0..ncols.value() { + *res.get_unchecked_mut((i, j)) = + MaybeUninit::new(iter.next().unwrap().inlined_clone()) } } - } - // Safety: all entries have been initialized. - unsafe { res.assume_init() } + // SAFETY: the result has been fully initialized above. + res.assume_init() + } } /// Creates a matrix with its elements filled with the components provided by a slice. The /// components must have the same layout as the matrix data storage (i.e. column-major). #[inline] - pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self - where - T: Clone, - { + pub fn from_column_slice_generic(nrows: R, ncols: C, slice: &[T]) -> Self { Self::from_iterator_generic(nrows, ncols, slice.iter().cloned()) } @@ -120,18 +153,18 @@ where where F: FnMut(usize, usize) -> T, { - let mut res = Self::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); - for j in 0..ncols.value() { - for i in 0..nrows.value() { - unsafe { + unsafe { + for j in 0..ncols.value() { + for i in 0..nrows.value() { *res.get_unchecked_mut((i, j)) = MaybeUninit::new(f(i, j)); } } - } - // Safety: all entries have been initialized. - unsafe { res.assume_init() } + // SAFETY: the result has been fully initialized above. + res.assume_init() + } } /// Creates a new identity matrix. @@ -141,7 +174,7 @@ where #[inline] pub fn identity_generic(nrows: R, ncols: C) -> Self where - T: Zero + One + Scalar, + T: Zero + One, { Self::from_diagonal_element_generic(nrows, ncols, T::one()) } @@ -153,7 +186,7 @@ where #[inline] pub fn from_diagonal_element_generic(nrows: R, ncols: C, elt: T) -> Self where - T: Zero + One + Scalar, + T: Zero + One, { let mut res = Self::zeros_generic(nrows, ncols); @@ -171,7 +204,7 @@ where #[inline] pub fn from_partial_diagonal_generic(nrows: R, ncols: C, elts: &[T]) -> Self where - T: Zero + Clone, + T: Zero, { let mut res = Self::zeros_generic(nrows, ncols); assert!( @@ -180,7 +213,7 @@ where ); for (i, elt) in elts.iter().enumerate() { - unsafe { *res.get_unchecked_mut((i, i)) = elt.clone() } + unsafe { *res.get_unchecked_mut((i, i)) = elt.inlined_clone() } } res @@ -205,8 +238,7 @@ where #[inline] pub fn from_rows(rows: &[Matrix, C, SB>]) -> Self where - T: Clone, - SB: Storage, C>, + SB: RawStorage, C>, { assert!(!rows.is_empty(), "At least one row must be given."); let nrows = R::try_to_usize().unwrap_or_else(|| rows.len()); @@ -225,7 +257,7 @@ where // TODO: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - rows[i][(0, j)].clone() + rows[i][(0, j)].inlined_clone() }) } @@ -248,8 +280,7 @@ where #[inline] pub fn from_columns(columns: &[Vector]) -> Self where - T: Clone, - SB: Storage, + SB: RawStorage, { assert!(!columns.is_empty(), "At least one column must be given."); let ncols = C::try_to_usize().unwrap_or_else(|| columns.len()); @@ -268,7 +299,7 @@ where // TODO: optimize that. Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |i, j| { - columns[j][i].clone() + columns[j][i].inlined_clone() }) } @@ -321,6 +352,7 @@ where impl OMatrix where + T: Scalar, DefaultAllocator: Allocator, { /// Creates a square matrix with its diagonal set to `diag` and all other entries set to 0. @@ -342,11 +374,11 @@ where /// dm[(2, 0)] == 0.0 && dm[(2, 1)] == 0.0 && dm[(2, 2)] == 3.0); /// ``` #[inline] - pub fn from_diagonal>(diag: &Vector) -> Self + pub fn from_diagonal>(diag: &Vector) -> Self where - T: Zero + Scalar, + T: Zero, { - let (dim, _) = diag.data.shape(); + let (dim, _) = diag.shape_generic(); let mut res = Self::zeros_generic(dim, dim); for i in 0..diag.len() { @@ -366,6 +398,12 @@ where */ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { + /// Creates a new uninitialized matrix or vector. + #[inline] + pub unsafe fn new_uninitialized($($args: usize),*) -> MaybeUninit { + Self::new_uninitialized_generic($($gargs),*) + } + /// Creates a matrix or vector with all its elements set to `elem`. /// /// # Example @@ -387,10 +425,7 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0); /// ``` #[inline] - pub fn from_element($($args: usize,)* elem: T) -> Self - where - T: Clone - { + pub fn from_element($($args: usize,)* elem: T) -> Self { Self::from_element_generic($($gargs, )* elem) } @@ -417,10 +452,7 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 2.0 && dm[(1, 1)] == 2.0 && dm[(1, 2)] == 2.0); /// ``` #[inline] - pub fn repeat($($args: usize,)* elem: T) -> Self - where - T: Clone - { + pub fn repeat($($args: usize,)* elem: T) -> Self { Self::repeat_generic($($gargs, )* elem) } @@ -446,9 +478,7 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn zeros($($args: usize),*) -> Self - where - T: Zero + Clone - { + where T: Zero { Self::zeros_generic($($gargs),*) } @@ -504,7 +534,8 @@ macro_rules! impl_constructors( /// dm[(1, 0)] == 3 && dm[(1, 1)] == 4 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_fn T>($($args: usize,)* f: F) -> Self { + pub fn from_fn($($args: usize,)* f: F) -> Self + where F: FnMut(usize, usize) -> T { Self::from_fn_generic($($gargs, )* f) } @@ -528,9 +559,7 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn identity($($args: usize,)*) -> Self - where - T: Zero + One + Scalar - { + where T: Zero + One { Self::identity_generic($($gargs),* ) } @@ -553,9 +582,7 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn from_diagonal_element($($args: usize,)* elt: T) -> Self - where - T: Zero + One + Scalar - { + where T: Zero + One { Self::from_diagonal_element_generic($($gargs, )* elt) } @@ -582,9 +609,7 @@ macro_rules! impl_constructors( /// ``` #[inline] pub fn from_partial_diagonal($($args: usize,)* elts: &[T]) -> Self - where - T: Zero + Scalar - { + where T: Zero { Self::from_partial_diagonal_generic($($gargs, )* elts) } @@ -603,16 +628,14 @@ macro_rules! impl_constructors( #[inline] #[cfg(feature = "rand")] pub fn new_random($($args: usize),*) -> Self - where - Standard: Distribution - { + where Standard: Distribution { Self::new_random_generic($($gargs),*) } } ); /// # Constructors of statically-sized vectors or statically-sized matrices -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -623,19 +646,8 @@ where ); // Arguments for non-generic constructors. } -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Creates a new uninitialized matrix or vector. - #[inline] - pub fn new_uninitialized() -> OMatrix, R, C> { - Self::new_uninitialized_generic(R::name(), C::name()) - } -} - /// # Constructors of matrices with a dynamic number of columns -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -645,19 +657,8 @@ where ncols); } -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Creates a new uninitialized matrix or vector. - #[inline] - pub fn new_uninitialized(ncols: usize) -> OMatrix, R, Dynamic> { - Self::new_uninitialized_generic(R::name(), Dynamic::new(ncols)) - } -} - /// # Constructors of dynamic vectors and matrices with a dynamic number of rows -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -667,19 +668,8 @@ where nrows); } -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Creates a new uninitialized matrix or vector. - #[inline] - pub fn new_uninitialized(nrows: usize) -> OMatrix, Dynamic, C> { - Self::new_uninitialized_generic(Dynamic::new(nrows), C::name()) - } -} - /// # Constructors of fully dynamic matrices -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -689,20 +679,6 @@ where nrows, ncols); } -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Creates a new uninitialized matrix or vector. - #[inline] - pub fn new_uninitialized( - nrows: usize, - ncols: usize, - ) -> OMatrix, Dynamic, Dynamic> { - Self::new_uninitialized_generic(Dynamic::new(nrows), Dynamic::new(ncols)) - } -} - /* * * Constructors that don't necessarily require all dimensions @@ -711,10 +687,8 @@ where */ macro_rules! impl_constructors_from_data( ($data: ident; $($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl OMatrix - where - DefaultAllocator: Allocator - { + impl OMatrix + where DefaultAllocator: Allocator { /// Creates a matrix with its elements filled with the components provided by a slice /// in row-major order. /// @@ -741,10 +715,7 @@ macro_rules! impl_constructors_from_data( /// dm[(1, 0)] == 3 && dm[(1, 1)] == 4 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_row_slice($($args: usize,)* $data: &[T]) -> Self - where - T: Clone - { + pub fn from_row_slice($($args: usize,)* $data: &[T]) -> Self { Self::from_row_slice_generic($($gargs, )* $data) } @@ -771,10 +742,7 @@ macro_rules! impl_constructors_from_data( /// dm[(1, 0)] == 1 && dm[(1, 1)] == 3 && dm[(1, 2)] == 5); /// ``` #[inline] - pub fn from_column_slice($($args: usize,)* $data: &[T]) -> Self - where - T: Clone - { + pub fn from_column_slice($($args: usize,)* $data: &[T]) -> Self { Self::from_column_slice_generic($($gargs, )* $data) } @@ -877,7 +845,7 @@ where } #[cfg(feature = "rand-no-std")] -impl Distribution> for Standard +impl Distribution> for Standard where DefaultAllocator: Allocator, Standard: Distribution, @@ -892,10 +860,13 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for OMatrix +impl Arbitrary for OMatrix where - T: Arbitrary + Send, + R: Dim, + C: Dim, + T: Scalar + Arbitrary + Send, DefaultAllocator: Allocator, + Owned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/base/construction_slice.rs b/src/base/construction_slice.rs index 650fbfd0..7094bdca 100644 --- a/src/base/construction_slice.rs +++ b/src/base/construction_slice.rs @@ -1,11 +1,13 @@ use crate::base::dimension::{Const, Dim, DimName, Dynamic}; use crate::base::matrix_slice::{SliceStorage, SliceStorageMut}; -use crate::base::{MatrixSlice, MatrixSliceMutMN}; +use crate::base::{MatrixSlice, MatrixSliceMutMN, Scalar}; use num_rational::Ratio; /// # Creating matrix slices from `&[T]` -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSlice<'a, T, R, C, RStride, CStride> { +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> + MatrixSlice<'a, T, R, C, RStride, CStride> +{ /// Creates, without bound-checking, a matrix slice from an array and with dimensions and strides specified by generic types instances. /// /// # Safety @@ -55,7 +57,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSlice<'a, T, R, C, } } -impl<'a, T, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { +impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { /// Creates, without bound-checking, a matrix slice from an array and with dimensions specified by generic types instances. /// /// # Safety @@ -85,7 +87,7 @@ impl<'a, T, R: Dim, C: Dim> MatrixSlice<'a, T, R, C> { macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, T, $($DimIdent: $DimBound),*> MatrixSlice<'a, T, $($Dims),*> { + impl<'a, T: Scalar, $($DimIdent: $DimBound),*> MatrixSlice<'a, T, $($Dims),*> { /// Creates a new matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -101,7 +103,7 @@ macro_rules! impl_constructors( } } - impl<'a, T, $($DimIdent: $DimBound, )*> MatrixSlice<'a, T, $($Dims,)* Dynamic, Dynamic> { + impl<'a, T: Scalar, $($DimIdent: $DimBound, )*> MatrixSlice<'a, T, $($Dims,)* Dynamic, Dynamic> { /// Creates a new matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -141,7 +143,7 @@ impl_constructors!(Dynamic, Dynamic; nrows, ncols); /// # Creating mutable matrix slices from `&mut [T]` -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> MatrixSliceMutMN<'a, T, R, C, RStride, CStride> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions and strides specified by generic types instances. @@ -215,7 +217,7 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> } } -impl<'a, T, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { +impl<'a, T: Scalar, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { /// Creates, without bound-checking, a mutable matrix slice from an array and with dimensions specified by generic types instances. /// /// # Safety @@ -245,7 +247,7 @@ impl<'a, T, R: Dim, C: Dim> MatrixSliceMutMN<'a, T, R, C> { macro_rules! impl_constructors_mut( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { - impl<'a, T, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, T, $($Dims),*> { + impl<'a, T: Scalar, $($DimIdent: $DimBound),*> MatrixSliceMutMN<'a, T, $($Dims),*> { /// Creates a new mutable matrix slice from the given data array. /// /// Panics if `data` does not contain enough elements. @@ -261,7 +263,7 @@ macro_rules! impl_constructors_mut( } } - impl<'a, T, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, T, $($Dims,)* Dynamic, Dynamic> { + impl<'a, T: Scalar, $($DimIdent: $DimBound, )*> MatrixSliceMutMN<'a, T, $($Dims,)* Dynamic, Dynamic> { /// Creates a new mutable matrix slice with the specified strides from the given data array. /// /// Panics if `data` does not contain enough elements. diff --git a/src/base/conversion.rs b/src/base/conversion.rs index b8a50048..ec7fd936 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -1,10 +1,8 @@ -use std::borrow::{Borrow, BorrowMut}; -use std::convert::{AsMut, AsRef, From, Into}; -use std::mem::{self, ManuallyDrop, MaybeUninit}; - #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; use simba::scalar::{SubsetOf, SupersetOf}; +use std::borrow::{Borrow, BorrowMut}; +use std::convert::{AsMut, AsRef, From, Into}; use simba::simd::{PrimitiveSimdValue, SimdValue}; @@ -16,7 +14,7 @@ use crate::base::dimension::{ Const, Dim, DimName, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9, }; use crate::base::iter::{MatrixIter, MatrixIterMut}; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; +use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut}; use crate::base::{ ArrayStorage, DVectorSlice, DVectorSliceMut, DefaultAllocator, Matrix, MatrixSlice, MatrixSliceMut, OMatrix, Scalar, @@ -26,12 +24,17 @@ use crate::base::{DVector, VecStorage}; use crate::base::{SliceStorage, SliceStorageMut}; use crate::constraint::DimEq; use crate::{IsNotStaticOne, RowSVector, SMatrix, SVector}; +use std::mem::MaybeUninit; // TODO: too bad this won't work for slice conversions. -impl SubsetOf> - for OMatrix +impl SubsetOf> for OMatrix where - T2: SupersetOf, + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, + T1: Scalar, + T2: Scalar + SupersetOf, DefaultAllocator: Allocator + Allocator + SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -41,11 +44,11 @@ where let (nrows, ncols) = self.shape(); let nrows2 = R2::from_usize(nrows); let ncols2 = C2::from_usize(ncols); - - let mut res = Matrix::new_uninitialized_generic(nrows2, ncols2); + let mut res = Matrix::uninit(nrows2, ncols2); for i in 0..nrows { for j in 0..ncols { + // Safety: all indices are in range. unsafe { *res.get_unchecked_mut((i, j)) = MaybeUninit::new(T2::from_subset(self.get_unchecked((i, j)))); @@ -53,7 +56,7 @@ where } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -67,23 +70,25 @@ where let (nrows2, ncols2) = m.shape(); let nrows = R1::from_usize(nrows2); let ncols = C1::from_usize(ncols2); + let mut res = Matrix::uninit(nrows, ncols); - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); for i in 0..nrows2 { for j in 0..ncols2 { + // Safety: all indices are in range. unsafe { *res.get_unchecked_mut((i, j)) = - MaybeUninit::new(m.get_unchecked((i, j)).to_subset_unchecked()); + MaybeUninit::new(m.get_unchecked((i, j)).to_subset_unchecked()) } } } - // Safety: all entries have been initialized. unsafe { res.assume_init() } } } -impl<'a, T, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix { +impl<'a, T: Scalar, R: Dim, C: Dim, S: RawStorage> IntoIterator + for &'a Matrix +{ type Item = &'a T; type IntoIter = MatrixIter<'a, T, R, C, S>; @@ -93,7 +98,9 @@ impl<'a, T, R: Dim, C: Dim, S: Storage> IntoIterator for &'a Matrix> IntoIterator for &'a mut Matrix { +impl<'a, T: Scalar, R: Dim, C: Dim, S: RawStorageMut> IntoIterator + for &'a mut Matrix +{ type Item = &'a mut T; type IntoIter = MatrixIterMut<'a, T, R, C, S>; @@ -103,35 +110,32 @@ impl<'a, T, R: Dim, C: Dim, S: StorageMut> IntoIterator for &'a mut Mat } } -impl From<[T; D]> for SVector { +impl From<[T; D]> for SVector { #[inline] fn from(arr: [T; D]) -> Self { - Self::from_data(ArrayStorage([arr; 1])) + unsafe { Self::from_data_statically_unchecked(ArrayStorage([arr; 1])) } } } -impl From> for [T; D] { +impl From> for [T; D] { #[inline] fn from(vec: SVector) -> Self { - let data = ManuallyDrop::new(vec.data.0); - // Safety: [[T; D]; 1] always has the same data layout as [T; D]. - let res = unsafe { (data.as_ptr() as *const [_; D]).read() }; - mem::forget(data); - res + // TODO: unfortunately, we must clone because we can move out of an array. + vec.data.0[0].clone() } } -impl From<[T; D]> for RowSVector +impl From<[T; D]> for RowSVector where Const: IsNotStaticOne, { #[inline] fn from(arr: [T; D]) -> Self { - SVector::::from(arr).transpose_into() + SVector::::from(arr).transpose() } } -impl From> for [T; D] +impl From> for [T; D] where Const: IsNotStaticOne, { @@ -144,10 +148,11 @@ where macro_rules! impl_from_into_asref_1D( ($(($NRows: ident, $NCols: ident) => $SZ: expr);* $(;)*) => {$( impl AsRef<[T; $SZ]> for Matrix - where - S: ContiguousStorage { + where T: Scalar, + S: RawStorage + IsContiguous { #[inline] fn as_ref(&self) -> &[T; $SZ] { + // Safety: this is OK thanks to the IsContiguous trait. unsafe { &*(self.data.ptr() as *const [T; $SZ]) } @@ -155,10 +160,11 @@ macro_rules! impl_from_into_asref_1D( } impl AsMut<[T; $SZ]> for Matrix - where - S: ContiguousStorageMut { + where T: Scalar, + S: RawStorageMut + IsContiguous { #[inline] fn as_mut(&mut self) -> &mut [T; $SZ] { + // Safety: this is OK thanks to the IsContiguous trait. unsafe { &mut *(self.data.ptr_mut() as *mut [T; $SZ]) } @@ -182,14 +188,14 @@ impl_from_into_asref_1D!( (U13, U1) => 13; (U14, U1) => 14; (U15, U1) => 15; (U16, U1) => 16; ); -impl From<[[T; R]; C]> for SMatrix { +impl From<[[T; R]; C]> for SMatrix { #[inline] fn from(arr: [[T; R]; C]) -> Self { - Self::from_data(ArrayStorage(arr)) + unsafe { Self::from_data_statically_unchecked(ArrayStorage(arr)) } } } -impl From> for [[T; R]; C] { +impl From> for [[T; R]; C] { #[inline] fn from(vec: SMatrix) -> Self { vec.data.0 @@ -203,20 +209,22 @@ macro_rules! impl_from_into_asref_borrow_2D( ($NRows: ty, $NCols: ty) => ($SZRows: expr, $SZCols: expr); $Ref:ident.$ref:ident(), $Mut:ident.$mut:ident() ) => { - impl $Ref<[[T; $SZRows]; $SZCols]> for Matrix - where S: ContiguousStorage { + impl $Ref<[[T; $SZRows]; $SZCols]> for Matrix + where S: RawStorage + IsContiguous { #[inline] fn $ref(&self) -> &[[T; $SZRows]; $SZCols] { + // Safety: OK thanks to the IsContiguous trait. unsafe { &*(self.data.ptr() as *const [[T; $SZRows]; $SZCols]) } } } - impl $Mut<[[T; $SZRows]; $SZCols]> for Matrix - where S: ContiguousStorageMut { + impl $Mut<[[T; $SZRows]; $SZCols]> for Matrix + where S: RawStorageMut + IsContiguous { #[inline] fn $mut(&mut self) -> &mut [[T; $SZRows]; $SZCols] { + // Safety: OK thanks to the IsContiguous trait. unsafe { &mut *(self.data.ptr_mut() as *mut [[T; $SZRows]; $SZCols]) } @@ -244,9 +252,13 @@ impl_from_into_asref_borrow_2D!( (U6, U2) => (6, 2); (U6, U3) => (6, 3); (U6, U4) => (6, 4); (U6, U5) => (6, 5); (U6, U6) => (6, 6); ); -impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> +impl<'a, T, RStride, CStride, const R: usize, const C: usize> From, Const, RStride, CStride>> for Matrix, Const, ArrayStorage> +where + T: Scalar, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, Const, Const, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -254,9 +266,13 @@ impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> - From> +impl<'a, T, C, RStride, CStride> From> for Matrix> +where + T: Scalar, + C: Dim, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, Dynamic, C, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -264,18 +280,26 @@ impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T: Clone, R: DimName, RStride: Dim, CStride: Dim> - From> +impl<'a, T, R, RStride, CStride> From> for Matrix> +where + T: Scalar, + R: DimName, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSlice<'a, T, R, Dynamic, RStride, CStride>) -> Self { matrix_slice.into_owned() } } -impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> +impl<'a, T, RStride, CStride, const R: usize, const C: usize> From, Const, RStride, CStride>> for Matrix, Const, ArrayStorage> +where + T: Scalar, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, Const, Const, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -283,9 +307,13 @@ impl<'a, T: Clone, RStride: Dim, CStride: Dim, const R: usize, const C: usize> } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> - From> +impl<'a, T, C, RStride, CStride> From> for Matrix> +where + T: Scalar, + C: Dim, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, Dynamic, C, RStride, CStride>) -> Self { matrix_slice.into_owned() @@ -293,26 +321,37 @@ impl<'a, T: Clone, C: Dim, RStride: Dim, CStride: Dim> } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T: Clone, R: DimName, RStride: Dim, CStride: Dim> - From> +impl<'a, T, R, RStride, CStride> From> for Matrix> +where + T: Scalar, + R: DimName, + RStride: Dim, + CStride: Dim, { fn from(matrix_slice: MatrixSliceMut<'a, T, R, Dynamic, RStride, CStride>) -> Self { matrix_slice.into_owned() } } -impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> - From<&'a Matrix> for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a Matrix> + for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> where - S: Storage, + T: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + RStride: Dim, + CStride: Dim, + S: RawStorage, ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { fn from(m: &'a Matrix) -> Self { - let (row, col) = m.data.shape(); + let (row, col) = m.shape_generic(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); @@ -327,23 +366,29 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - - Self::from_data(data) + Matrix::from_data_statically_unchecked(data) } } } -impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> - From<&'a mut Matrix> for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> + for MatrixSlice<'a, T, RSlice, CSlice, RStride, CStride> where - S: Storage, + T: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + RStride: Dim, + CStride: Dim, + S: RawStorage, ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { fn from(m: &'a mut Matrix) -> Self { - let (row, col) = m.data.shape(); + let (row, col) = m.shape_generic(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); @@ -358,23 +403,29 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } } -impl<'a, T, R: Dim, C: Dim, RSlice: Dim, CSlice: Dim, RStride: Dim, CStride: Dim, S> - From<&'a mut Matrix> for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride> +impl<'a, T, R, C, RSlice, CSlice, RStride, CStride, S> From<&'a mut Matrix> + for MatrixSliceMut<'a, T, RSlice, CSlice, RStride, CStride> where - S: StorageMut, + T: Scalar, + R: Dim, + C: Dim, + RSlice: Dim, + CSlice: Dim, + RStride: Dim, + CStride: Dim, + S: RawStorageMut, ShapeConstraint: DimEq + DimEq + DimEq + DimEq, { fn from(m: &'a mut Matrix) -> Self { - let (row, col) = m.data.shape(); + let (row, col) = m.shape_generic(); let row_slice = RSlice::from_usize(row.value()); let col_slice = CSlice::from_usize(col.value()); @@ -389,22 +440,21 @@ where (row_slice, col_slice), (rstride_slice, cstride_slice), ); - - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } } #[cfg(any(feature = "std", feature = "alloc"))] -impl<'a, T> From> for DVector { +impl<'a, T: Scalar> From> for DVector { #[inline] fn from(vec: Vec) -> Self { Self::from_vec(vec) } } -impl<'a, T, R: Dim, C: Dim, S: ContiguousStorage> From<&'a Matrix> - for &'a [T] +impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: RawStorage + IsContiguous> + From<&'a Matrix> for &'a [T] { #[inline] fn from(matrix: &'a Matrix) -> Self { @@ -412,8 +462,8 @@ impl<'a, T, R: Dim, C: Dim, S: ContiguousStorage> From<&'a Matrix> From<&'a mut Matrix> - for &'a mut [T] +impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: RawStorageMut + IsContiguous> + From<&'a mut Matrix> for &'a mut [T] { #[inline] fn from(matrix: &'a mut Matrix) -> Self { @@ -421,27 +471,27 @@ impl<'a, T, R: Dim, C: Dim, S: ContiguousStorageMut> From<&'a mut Matri } } -impl<'a, T> From<&'a [T]> for DVectorSlice<'a, T> { +impl<'a, T: Scalar + Copy> From<&'a [T]> for DVectorSlice<'a, T> { #[inline] fn from(slice: &'a [T]) -> Self { Self::from_slice(slice, slice.len()) } } -impl<'a, T> From> for &'a [T] { +impl<'a, T: Scalar> From> for &'a [T] { fn from(vec: DVectorSlice<'a, T>) -> &'a [T] { vec.data.into_slice() } } -impl<'a, T> From<&'a mut [T]> for DVectorSliceMut<'a, T> { +impl<'a, T: Scalar + Copy> From<&'a mut [T]> for DVectorSliceMut<'a, T> { #[inline] fn from(slice: &'a mut [T]) -> Self { Self::from_slice(slice, slice.len()) } } -impl<'a, T> From> for &'a mut [T] { +impl<'a, T: Scalar> From> for &'a mut [T] { fn from(vec: DVectorSliceMut<'a, T>) -> &'a mut [T] { vec.data.into_slice_mut() } @@ -456,7 +506,7 @@ where { #[inline] fn from(arr: [OMatrix; 2]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ @@ -477,7 +527,7 @@ where { #[inline] fn from(arr: [OMatrix; 4]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ @@ -500,7 +550,7 @@ where { #[inline] fn from(arr: [OMatrix; 8]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ @@ -526,7 +576,7 @@ where DefaultAllocator: Allocator + Allocator, { fn from(arr: [OMatrix; 16]) -> Self { - let (nrows, ncols) = arr[0].data.shape(); + let (nrows, ncols) = arr[0].shape_generic(); Self::from_fn_generic(nrows, ncols, |i, j| { [ diff --git a/src/base/coordinates.rs b/src/base/coordinates.rs index 6389ccbe..db66811d 100644 --- a/src/base/coordinates.rs +++ b/src/base/coordinates.rs @@ -7,8 +7,8 @@ use std::ops::{Deref, DerefMut}; use crate::base::dimension::{U1, U2, U3, U4, U5, U6}; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut}; -use crate::base::Matrix; +use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut}; +use crate::base::{Matrix, Scalar}; /* * @@ -23,7 +23,7 @@ macro_rules! coords_impl( #[repr(C)] #[derive(Eq, PartialEq, Clone, Hash, Debug, Copy)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] - pub struct $T { + pub struct $T { $(pub $comps: T),* } } @@ -31,20 +31,22 @@ macro_rules! coords_impl( macro_rules! deref_impl( ($R: ty, $C: ty; $Target: ident) => { - impl Deref for Matrix - where S: ContiguousStorage { + impl Deref for Matrix + where S: RawStorage + IsContiguous { type Target = $Target; #[inline] fn deref(&self) -> &Self::Target { + // Safety: this is OK because of the IsContiguous trait. unsafe { &*(self.data.ptr() as *const Self::Target) } } } - impl DerefMut for Matrix - where S: ContiguousStorageMut { + impl DerefMut for Matrix + where S: RawStorageMut + IsContiguous { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { + // Safety: this is OK because of the IsContiguous trait. unsafe { &mut *(self.data.ptr_mut() as *mut Self::Target) } } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index 9face98c..2f996008 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -4,72 +4,50 @@ //! heap-allocated buffers for matrices with at least one dimension unknown at compile-time. use std::cmp; -use std::fmt; -use std::mem::{self, ManuallyDrop, MaybeUninit}; +use std::mem; use std::ptr; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; +use super::Const; +use crate::base::allocator::{Allocator, Reallocator}; +use crate::base::array_storage::ArrayStorage; #[cfg(any(feature = "alloc", feature = "std"))] use crate::base::dimension::Dynamic; - -use super::Const; -use crate::base::allocator::{Allocator, InnerAllocator, Reallocator}; -use crate::base::array_storage::ArrayStorage; use crate::base::dimension::{Dim, DimName}; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, InnerOwned, Storage, StorageMut, -}; +use crate::base::storage::{RawStorage, RawStorageMut}; +#[cfg(any(feature = "std", feature = "alloc"))] use crate::base::vec_storage::VecStorage; -use crate::U1; +use crate::base::Scalar; +use std::mem::{ManuallyDrop, MaybeUninit}; /* * * Allocator. * */ -/// A helper struct that controls how the storage for a matrix should be allocated. -/// -/// This struct is useless on its own. Instead, it's used in trait /// An allocator based on `GenericArray` and `VecStorage` for statically-sized and dynamically-sized /// matrices respectively. #[derive(Copy, Clone, Debug)] pub struct DefaultAllocator; // Static - Static -impl InnerAllocator, Const> for DefaultAllocator { +impl Allocator, Const> + for DefaultAllocator +{ type Buffer = ArrayStorage; + type BufferUninit = ArrayStorage, R, C>; #[inline] - fn allocate_from_iterator>( - nrows: Const, - ncols: Const, - iter: I, - ) -> Self::Buffer { - let mut res = Self::allocate_uninitialized(nrows, ncols); - let mut count = 0; - - for (res, e) in res.as_mut_slice().iter_mut().zip(iter.into_iter()) { - *res = MaybeUninit::new(e); - count += 1; - } - - assert!( - count == nrows.value() * ncols.value(), - "Matrix init. from iterator: iterator not long enough." - ); - - // Safety: we have initialized all entries. - unsafe { , Const>>::assume_init(res) } + unsafe fn allocate_uninitialized(_: Const, _: Const) -> MaybeUninit { + mem::MaybeUninit::::uninit() } -} -impl Allocator, Const> for DefaultAllocator { #[inline] - fn allocate_uninitialized(_: Const, _: Const) -> ArrayStorage, R, C> { + fn allocate_uninit(_: Const, _: Const) -> ArrayStorage, R, C> { // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. - let array = unsafe { MaybeUninit::uninit().assume_init() }; + let array: [[MaybeUninit; R]; C] = unsafe { MaybeUninit::uninit().assume_init() }; ArrayStorage(array) } @@ -83,41 +61,53 @@ impl Allocator, Const> for Def ArrayStorage((&uninit as *const _ as *const [_; C]).read()) } - /// Specifies that a given buffer's entries should be manually dropped. #[inline] - fn manually_drop(buf: ArrayStorage) -> ArrayStorage, R, C> { - // SAFETY: - // * `ManuallyDrop` and T are guaranteed to have the same layout - // * `ManuallyDrop` does not drop, so there are no double-frees - // And thus the conversion is safe - unsafe { ArrayStorage((&ManuallyDrop::new(buf) as *const _ as *const [_; C]).read()) } + fn allocate_from_iterator>( + nrows: Const, + ncols: Const, + iter: I, + ) -> Self::Buffer { + #[cfg(feature = "no_unsound_assume_init")] + let mut res: Self::Buffer = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() }; + let mut count = 0; + + // Safety: this is OK because the Buffer is known to be contiguous. + let res_slice = unsafe { res.as_mut_slice_unchecked() }; + for (res, e) in res_slice.iter_mut().zip(iter.into_iter()) { + *res = e; + count += 1; + } + + assert!( + count == nrows.value() * ncols.value(), + "Matrix init. from iterator: iterator not long enough." + ); + + res } } // Dynamic - Static // Dynamic - Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl InnerAllocator for DefaultAllocator { +impl Allocator for DefaultAllocator { type Buffer = VecStorage; + type BufferUninit = VecStorage, Dynamic, C>; #[inline] - fn allocate_from_iterator>( - nrows: Dynamic, - ncols: C, - iter: I, - ) -> Self::Buffer { - let it = iter.into_iter(); - let res: Vec = it.collect(); - assert!(res.len() == nrows.value() * ncols.value(), - "Allocation from iterator error: the iterator did not yield the correct number of elements."); + unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> MaybeUninit { + let mut res = Vec::new(); + let length = nrows.value() * ncols.value(); + res.reserve_exact(length); + res.set_len(length); - VecStorage::new(nrows, ncols, res) + mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) } -} -impl Allocator for DefaultAllocator { #[inline] - fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> VecStorage, Dynamic, C> { + fn allocate_uninit(nrows: Dynamic, ncols: C) -> VecStorage, Dynamic, C> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -143,32 +133,10 @@ impl Allocator for DefaultAllocator { VecStorage::new(nrows, ncols, new_data) } - #[inline] - fn manually_drop(buf: VecStorage) -> VecStorage, Dynamic, C> { - // Avoids a double-drop. - let (nrows, ncols) = buf.shape(); - let vec: Vec<_> = buf.into(); - let mut md = ManuallyDrop::new(vec); - - // Safety: - // - ManuallyDrop has the same alignment and layout as T. - // - The length and capacity come from a valid vector. - let new_data = - unsafe { Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()) }; - - VecStorage::new(nrows, ncols, new_data) - } -} - -// Static - Dynamic -#[cfg(any(feature = "std", feature = "alloc"))] -impl InnerAllocator for DefaultAllocator { - type Buffer = VecStorage; - #[inline] fn allocate_from_iterator>( - nrows: R, - ncols: Dynamic, + nrows: Dynamic, + ncols: C, iter: I, ) -> Self::Buffer { let it = iter.into_iter(); @@ -180,9 +148,24 @@ impl InnerAllocator for DefaultAllocator { } } -impl Allocator for DefaultAllocator { +// Static - Dynamic +#[cfg(any(feature = "std", feature = "alloc"))] +impl Allocator for DefaultAllocator { + type Buffer = VecStorage; + type BufferUninit = VecStorage, R, Dynamic>; + #[inline] - fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> VecStorage, R, Dynamic> { + unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> MaybeUninit { + let mut res = Vec::new(); + let length = nrows.value() * ncols.value(); + res.reserve_exact(length); + res.set_len(length); + + mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) + } + + #[inline] + fn allocate_uninit(nrows: R, ncols: Dynamic) -> VecStorage, R, Dynamic> { let mut data = Vec::new(); let length = nrows.value() * ncols.value(); data.reserve_exact(length); @@ -209,253 +192,59 @@ impl Allocator for DefaultAllocator { } #[inline] - fn manually_drop(buf: VecStorage) -> VecStorage, R, Dynamic> { - // Avoids a double-drop. - let (nrows, ncols) = buf.shape(); - let vec: Vec<_> = buf.into(); - let mut md = ManuallyDrop::new(vec); + fn allocate_from_iterator>( + nrows: R, + ncols: Dynamic, + iter: I, + ) -> Self::Buffer { + let it = iter.into_iter(); + let res: Vec = it.collect(); + assert!(res.len() == nrows.value() * ncols.value(), + "Allocation from iterator error: the iterator did not yield the correct number of elements."); - // Safety: - // - ManuallyDrop has the same alignment and layout as T. - // - The length and capacity come from a valid vector. - let new_data = - unsafe { Vec::from_raw_parts(md.as_mut_ptr() as *mut _, md.len(), md.capacity()) }; - - VecStorage::new(nrows, ncols, new_data) + VecStorage::new(nrows, ncols, res) } } -/// The owned storage type for a matrix. -#[repr(transparent)] -pub struct Owned(pub InnerOwned) -where - DefaultAllocator: InnerAllocator; - -impl Copy for Owned -where - DefaultAllocator: InnerAllocator, - InnerOwned: Copy, -{ -} - -impl Clone for Owned -where - DefaultAllocator: InnerAllocator, -{ - fn clone(&self) -> Self { - if Self::is_array() { - // We first clone the data. - let slice = unsafe { self.as_slice_unchecked() }; - let vec = ManuallyDrop::new(slice.to_owned()); - - // We then transmute it back into an array and then an Owned. - unsafe { mem::transmute_copy(&*vec.as_ptr()) } - } else { - // We first clone the data. - let clone = ManuallyDrop::new(self.as_vec_storage().clone()); - - // We then transmute it back into an Owned. - unsafe { mem::transmute_copy(&clone) } - } - - // TODO: check that the auxiliary copies are elided. - } -} - -impl fmt::Debug for Owned -where - DefaultAllocator: InnerAllocator, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if Self::is_array() { - let slice = unsafe { self.as_slice_unchecked() }; - slice.fmt(f) - } else { - self.as_vec_storage().fmt(f) - } - } -} - -impl Owned, Const> { - fn new(array: [[T; R]; C]) -> Self { - Self(ArrayStorage(array)) - } -} - -impl Owned -where - DefaultAllocator: InnerAllocator, -{ - /// Returns whether `Self` stores an [`ArrayStorage`]. This is a zero-cost - /// operation. - const fn is_array() -> bool { - R::is_static() && C::is_static() - } - - /// Returns whether `Self` stores a [`VecStorage`]. - const fn is_vec() -> bool { - !Self::is_array() - } - - /// Returns a reference to the underlying [`VecStorage`]. - /// - /// # Panics - /// This method will panic if `Self` does not contain a [`VecStorage`]. - fn as_vec_storage(&self) -> &VecStorage { - assert!(Self::is_vec()); - - // Safety: `self` is transparent and must contain a `VecStorage`. - unsafe { &*(self as *const _ as *const _) } - } - - /// Returns a mutable reference to the underlying [`VecStorage`]. - /// - /// # Panics - /// This method will panic if `Self` does not contain a [`VecStorage`]. - fn as_vec_storage_mut(&mut self) -> &mut VecStorage { - assert!(Self::is_vec()); - - // Safety: `self` is transparent and must contain a `VecStorage`. - unsafe { &mut *(self as *mut _ as *mut _) } - } -} - -unsafe impl Storage for Owned -where - DefaultAllocator: InnerAllocator, -{ - type RStride = U1; - - type CStride = R; - - fn ptr(&self) -> *const T { - if Self::is_array() { - &self as *const _ as *const T - } else { - self.as_vec_storage().as_vec().as_ptr() - } - } - - fn shape(&self) -> (R, C) { - if Self::is_array() { - (R::default(), C::default()) - } else { - let vec = self.as_vec_storage(); - (vec.nrows, vec.ncols) - } - } - - fn strides(&self) -> (Self::RStride, Self::CStride) { - if Self::is_array() { - (U1::name(), R::default()) - } else { - let vec = self.as_vec_storage(); - (U1::name(), vec.nrows) - } - } - - #[inline(always)] - fn is_contiguous(&self) -> bool { - true - } - - unsafe fn as_slice_unchecked(&self) -> &[T] { - if Self::is_array() { - std::slice::from_raw_parts( - self.ptr(), - R::try_to_usize().unwrap() * C::try_to_usize().unwrap(), - ) - } else { - self.as_vec_storage().as_vec().as_ref() - } - } - - #[inline(always)] - fn into_owned(self) -> Self { - self - } - - #[inline(always)] - fn clone_owned(&self) -> Self - where - T: Clone, - { - self.clone() - } -} - -unsafe impl StorageMut for Owned -where - DefaultAllocator: InnerAllocator, -{ - fn ptr_mut(&mut self) -> *mut T { - if Self::is_array() { - &mut self as *mut _ as *mut T - } else { - self.as_vec_storage_mut().as_vec().as_ptr() - } - } - - unsafe fn as_mut_slice_unchecked(&mut self) -> &mut [T] { - if Self::is_array() { - std::slice::from_raw_parts( - self.ptr_mut(), - R::try_to_usize().unwrap() * C::try_to_usize().unwrap(), - ) - } else { - self.as_vec_storage_mut().as_vec_mut().as_mut() - } - } -} - -unsafe impl ContiguousStorage for Owned where - DefaultAllocator: InnerAllocator -{ -} - -unsafe impl ContiguousStorageMut for Owned where - DefaultAllocator: InnerAllocator -{ -} - /* * * Reallocator. * */ // Anything -> Static × Static -impl +impl Reallocator, Const> for DefaultAllocator where + RFrom: Dim, + CFrom: Dim, Self: Allocator, { #[inline] unsafe fn reallocate_copy( rto: Const, cto: Const, - buf: InnerOwned, + buf: >::Buffer, ) -> ArrayStorage { + #[cfg(feature = "no_unsound_assume_init")] + let mut res: ArrayStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] let mut res = - , Const>>::allocate_uninitialized(rto, cto); + , Const>>::allocate_uninitialized(rto, cto) + .assume_init(); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); - // Safety: TODO - , Const>>::assume_init(res) + res } } // Static × Static -> Dynamic × Any #[cfg(any(feature = "std", feature = "alloc"))] -impl +impl Reallocator, Const, Dynamic, CTo> for DefaultAllocator where CTo: Dim, @@ -466,25 +255,25 @@ where cto: CTo, buf: ArrayStorage, ) -> VecStorage { - let mut res = >::allocate_uninitialized(rto, cto); + #[cfg(feature = "no_unsound_assume_init")] + let mut res: VecStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = + >::allocate_uninitialized(rto, cto).assume_init(); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); - >::assume_init(res) + res } } // Static × Static -> Static × Dynamic #[cfg(any(feature = "std", feature = "alloc"))] -impl +impl Reallocator, Const, RTo, Dynamic> for DefaultAllocator where RTo: DimName, @@ -495,25 +284,27 @@ where cto: Dynamic, buf: ArrayStorage, ) -> VecStorage { - let mut res = >::allocate_uninitialized(rto, cto); + #[cfg(feature = "no_unsound_assume_init")] + let mut res: VecStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = + >::allocate_uninitialized(rto, cto).assume_init(); let (rfrom, cfrom) = buf.shape(); let len_from = rfrom.value() * cfrom.value(); let len_to = rto.value() * cto.value(); - ptr::copy_nonoverlapping( - buf.ptr(), - res.ptr_mut() as *mut T, - cmp::min(len_from, len_to), - ); + ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to)); - >::assume_init(res) + res } } // All conversion from a dynamic buffer to a dynamic buffer. #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator for DefaultAllocator { +impl Reallocator + for DefaultAllocator +{ #[inline] unsafe fn reallocate_copy( rto: Dynamic, @@ -526,7 +317,7 @@ impl Reallocator for D } #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -541,7 +332,7 @@ impl Reallocator } #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator +impl Reallocator for DefaultAllocator { #[inline] @@ -556,7 +347,7 @@ impl Reallocator } #[cfg(any(feature = "std", feature = "alloc"))] -impl Reallocator +impl Reallocator for DefaultAllocator { #[inline] diff --git a/src/base/dimension.rs b/src/base/dimension.rs index cfe66c87..8573dd59 100644 --- a/src/base/dimension.rs +++ b/src/base/dimension.rs @@ -2,7 +2,7 @@ //! Traits and tags for identifying the dimension of all algebraic entities. -use std::any::TypeId; +use std::any::{Any, TypeId}; use std::cmp; use std::fmt::Debug; use std::ops::{Add, Div, Mul, Sub}; @@ -11,8 +11,8 @@ use typenum::{self, Diff, Max, Maximum, Min, Minimum, Prod, Quot, Sum, Unsigned} #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; -/// Stores the dimension of dynamically-sized algebraic entities. -#[derive(Clone, Copy, Default, Eq, PartialEq, Debug)] +/// Dim of dynamically-sized algebraic entities. +#[derive(Clone, Copy, Eq, PartialEq, Debug)] pub struct Dynamic { value: usize, } @@ -55,7 +55,7 @@ impl IsNotStaticOne for Dynamic {} /// Trait implemented by any type that can be used as a dimension. This includes type-level /// integers and `Dynamic` (for dimensions not known at compile-time). -pub trait Dim: 'static + Debug + Copy + Default + PartialEq + Send + Sync { +pub trait Dim: Any + Debug + Copy + PartialEq + Send + Sync { #[inline(always)] fn is() -> bool { TypeId::of::() == TypeId::of::() @@ -65,16 +65,6 @@ pub trait Dim: 'static + Debug + Copy + Default + PartialEq + Send + Sync { /// Dynamic`. fn try_to_usize() -> Option; - /// Returns whether `Self` has a known compile-time value. - fn is_static() -> bool { - Self::try_to_usize().is_some() - } - - /// Returns whether `Self` does not have a known compile-time value. - fn is_dynamic() -> bool { - Self::try_to_usize().is_none() - } - /// Gets the run-time value of `self`. For type-level integers, this is the same as /// `Self::try_to_usize().unwrap()`. fn value(&self) -> usize; @@ -206,10 +196,7 @@ dim_ops!( DimMax, DimNameMax, Max, max, cmp::max, DimMaximum, DimNameMaximum, Maximum; ); -/// A wrapper around const types, which provides the capability of performing -/// type-level arithmetic. This might get removed if const-generics become -/// more powerful in the future. -#[derive(Debug, Copy, Clone, Default, PartialEq, Eq, Hash)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Const; /// Trait implemented exclusively by type-level integers. diff --git a/src/base/edition.rs b/src/base/edition.rs index 94c13b09..0cad0d29 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -2,9 +2,6 @@ use num::{One, Zero}; use std::cmp; #[cfg(any(feature = "std", feature = "alloc"))] use std::iter::ExactSizeIterator; -#[cfg(any(feature = "std", feature = "alloc"))] -use std::mem; -use std::mem::MaybeUninit; use std::ptr; use crate::base::allocator::{Allocator, Reallocator}; @@ -12,8 +9,10 @@ use crate::base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, Shap #[cfg(any(feature = "std", feature = "alloc"))] use crate::base::dimension::Dynamic; use crate::base::dimension::{Const, Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimSub, DimSum, U1}; -use crate::base::storage::{ContiguousStorageMut, ReshapableStorage, Storage, StorageMut}; +use crate::base::storage::{RawStorage, RawStorageMut, ReshapableStorage}; use crate::base::{DefaultAllocator, Matrix, OMatrix, RowVector, Scalar, Vector}; +use crate::Storage; +use std::mem::MaybeUninit; /// # Rows and columns extraction impl> Matrix { @@ -50,11 +49,11 @@ impl> Matrix { where I: IntoIterator, I::IntoIter: ExactSizeIterator + Clone, + DefaultAllocator: Allocator, { let irows = irows.into_iter(); - let ncols = self.data.shape().1; - let mut res = - OMatrix::::new_uninitialized_generic(Dynamic::new(irows.len()), ncols); + let ncols = self.shape_generic().1; + let mut res = Matrix::uninit(Dynamic::new(irows.len()), ncols); // First, check that all the indices from irows are valid. // This will allow us to use unchecked access in the inner loop. @@ -68,13 +67,15 @@ impl> Matrix { let src = self.column(j); for (destination, source) in irows.clone().enumerate() { + // Safety: all indices are in range. unsafe { *res.vget_unchecked_mut(destination) = - MaybeUninit::new(src.vget_unchecked(*source).clone()); + MaybeUninit::new(src.vget_unchecked(*source).inlined_clone()); } } } + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -88,32 +89,30 @@ impl> Matrix { DefaultAllocator: Allocator, { let icols = icols.into_iter(); - let nrows = self.data.shape().0; - let mut res = Matrix::new_uninitialized_generic(nrows, Dynamic::new(icols.len())); + let nrows = self.shape_generic().0; + let mut res = Matrix::uninit(nrows, Dynamic::new(icols.len())); for (destination, source) in icols.enumerate() { - for (d, s) in res - .column_mut(destination) - .iter_mut() - .zip(self.column(*source).iter()) - { - *d = MaybeUninit::new(s.clone()); - } + // NOTE: this is basically a copy_frow but wrapping the values insnide of MaybeUninit. + res.column_mut(destination) + .zip_apply(&self.column(*source), |out, e| { + *out = MaybeUninit::new(e.inlined_clone()) + }); } + // Safety: res is now fully initialized. unsafe { res.assume_init() } } } /// # Set rows, columns, and diagonal -impl> Matrix { +impl> Matrix { /// Fills the diagonal of this matrix with the content of the given vector. #[inline] pub fn set_diagonal(&mut self, diag: &Vector) where - T: Clone, R: DimMin, - S2: Storage, + S2: RawStorage, ShapeConstraint: DimEq, R2>, { let (nrows, ncols) = self.shape(); @@ -121,7 +120,7 @@ impl> Matrix { assert_eq!(diag.len(), min_nrows_ncols, "Mismatched dimensions."); for i in 0..min_nrows_ncols { - unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).clone() } + unsafe { *self.get_unchecked_mut((i, i)) = diag.vget_unchecked(i).inlined_clone() } } } @@ -144,8 +143,7 @@ impl> Matrix { #[inline] pub fn set_row(&mut self, i: usize, row: &RowVector) where - T: Clone, - S2: Storage, + S2: RawStorage, ShapeConstraint: SameNumberOfColumns, { self.row_mut(i).copy_from(row); @@ -155,8 +153,7 @@ impl> Matrix { #[inline] pub fn set_column(&mut self, i: usize, column: &Vector) where - T: Clone, - S2: Storage, + S2: RawStorage, ShapeConstraint: SameNumberOfRows, { self.column_mut(i).copy_from(column); @@ -164,23 +161,23 @@ impl> Matrix { } /// # In-place filling -impl> Matrix { +impl> Matrix { + /// Sets all the elements of this matrix to the value returned by the closure. + #[inline] + pub fn fill_with(&mut self, val: impl Fn() -> T) { + for e in self.iter_mut() { + *e = val() + } + } + /// Sets all the elements of this matrix to `val`. #[inline] pub fn fill(&mut self, val: T) where - T: Clone, + T: Scalar, { for e in self.iter_mut() { - *e = val.clone() - } - } - - /// Sets all the elements of this matrix to `f()`. - #[inline] - pub fn fill_fn T>(&mut self, mut f: F) { - for e in self.iter_mut() { - *e = f(); + *e = val.inlined_clone() } } @@ -188,7 +185,7 @@ impl> Matrix { #[inline] pub fn fill_with_identity(&mut self) where - T: Zero + One + Scalar, + T: Scalar + Zero + One, { self.fill(T::zero()); self.fill_diagonal(T::one()); @@ -198,13 +195,13 @@ impl> Matrix { #[inline] pub fn fill_diagonal(&mut self, val: T) where - T: Clone, + T: Scalar, { let (nrows, ncols) = self.shape(); let n = cmp::min(nrows, ncols); for i in 0..n { - unsafe { *self.get_unchecked_mut((i, i)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, i)) = val.inlined_clone() } } } @@ -212,11 +209,11 @@ impl> Matrix { #[inline] pub fn fill_row(&mut self, i: usize, val: T) where - T: Clone, + T: Scalar, { assert!(i < self.nrows(), "Row index out of bounds."); for j in 0..self.ncols() { - unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } @@ -224,11 +221,11 @@ impl> Matrix { #[inline] pub fn fill_column(&mut self, j: usize, val: T) where - T: Clone, + T: Scalar, { assert!(j < self.ncols(), "Row index out of bounds."); for i in 0..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } @@ -242,11 +239,11 @@ impl> Matrix { #[inline] pub fn fill_lower_triangle(&mut self, val: T, shift: usize) where - T: Clone, + T: Scalar, { for j in 0..self.ncols() { for i in (j + shift)..self.nrows() { - unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } } @@ -261,19 +258,19 @@ impl> Matrix { #[inline] pub fn fill_upper_triangle(&mut self, val: T, shift: usize) where - T: Clone, + T: Scalar, { for j in shift..self.ncols() { // TODO: is there a more efficient way to avoid the min ? // (necessary for rectangular matrices) for i in 0..cmp::min(j + 1 - shift, self.nrows()) { - unsafe { *self.get_unchecked_mut((i, j)) = val.clone() } + unsafe { *self.get_unchecked_mut((i, j)) = val.inlined_clone() } } } } } -impl> Matrix { +impl> Matrix { /// Copies the upper-triangle of this matrix to its lower-triangular part. /// /// This makes the matrix symmetric. Panics if the matrix is not square. @@ -284,7 +281,7 @@ impl> Matrix { for j in 0..dim { for i in j + 1..dim { unsafe { - *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).clone(); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); } } } @@ -299,7 +296,7 @@ impl> Matrix { for j in 1..self.ncols() { for i in 0..j { unsafe { - *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).clone(); + *self.get_unchecked_mut((i, j)) = self.get_unchecked((j, i)).inlined_clone(); } } } @@ -307,7 +304,7 @@ impl> Matrix { } /// # In-place swapping -impl> Matrix { +impl> Matrix { /// Swaps two rows in-place. #[inline] pub fn swap_rows(&mut self, irow1: usize, irow2: usize) { @@ -343,7 +340,7 @@ impl> Matrix { * */ /// # Rows and columns removal -impl> Matrix { +impl> Matrix { /* * * Column removal. @@ -367,7 +364,7 @@ impl> Matrix { DefaultAllocator: Reallocator, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut offset: usize = 0; let mut target: usize = 0; while offset + target < ncols.value() { @@ -401,7 +398,7 @@ impl> Matrix { DefaultAllocator: Reallocator, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut offset: usize = 0; let mut target: usize = 0; while offset + target < nrows.value() * ncols.value() { @@ -464,7 +461,7 @@ impl> Matrix { DefaultAllocator: Reallocator>, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); assert!( i + nremove.value() <= ncols.value(), "Column index out of range." @@ -543,7 +540,7 @@ impl> Matrix { DefaultAllocator: Reallocator, C>, { let mut m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); assert!( i + nremove.value() <= nrows.value(), "Row index out of range." @@ -552,7 +549,7 @@ impl> Matrix { if nremove.value() != 0 { unsafe { compress_rows( - &mut m.data.as_mut_slice(), + &mut m.as_mut_slice(), nrows.value(), ncols.value(), i, @@ -572,7 +569,7 @@ impl> Matrix { } /// # Rows and columns insertion -impl> Matrix { +impl> Matrix { /* * * Columns insertion. @@ -633,7 +630,7 @@ impl> Matrix { DefaultAllocator: Reallocator>, { let m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy( nrows, ncols.add(ninsert), @@ -717,7 +714,7 @@ impl> Matrix { DefaultAllocator: Reallocator, C>, { let m = self.into_owned(); - let (nrows, ncols) = m.data.shape(); + let (nrows, ncols) = m.shape_generic(); let mut res = Matrix::from_data(DefaultAllocator::reallocate_copy( nrows.add(ninsert), ncols, @@ -728,7 +725,7 @@ impl> Matrix { if ninsert.value() != 0 { extend_rows( - &mut res.data.as_mut_slice(), + &mut res.as_mut_slice(), nrows.value(), ncols.value(), i, @@ -741,7 +738,7 @@ impl> Matrix { } /// # Resizing and reshaping -impl> Matrix { +impl> Matrix { /// Resizes this matrix so that it contains `new_nrows` rows and `new_ncols` columns. /// /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more @@ -763,7 +760,7 @@ impl> Matrix { where DefaultAllocator: Reallocator, { - let ncols = self.data.shape().1; + let ncols = self.shape_generic().1; self.resize_generic(Dynamic::new(new_nrows), ncols, val) } @@ -776,7 +773,7 @@ impl> Matrix { where DefaultAllocator: Reallocator, { - let nrows = self.data.shape().0; + let nrows = self.shape_generic().0; self.resize_generic(nrows, Dynamic::new(new_ncols), val) } @@ -809,10 +806,10 @@ impl> Matrix { DefaultAllocator: Reallocator, { let (nrows, ncols) = self.shape(); - let mut data = self.data.into_owned(); + let mut data = self.into_owned(); if new_nrows.value() == nrows { - let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.0) }; + let res = unsafe { DefaultAllocator::reallocate_copy(new_nrows, new_ncols, data.data) }; let mut res = Matrix::from_data(res); if new_ncols.value() > ncols { res.columns_range_mut(ncols..).fill(val); @@ -832,14 +829,14 @@ impl> Matrix { nrows - new_nrows.value(), ); res = Matrix::from_data(DefaultAllocator::reallocate_copy( - new_nrows, new_ncols, data.0, + new_nrows, new_ncols, data.data, )); } else { res = Matrix::from_data(DefaultAllocator::reallocate_copy( - new_nrows, new_ncols, data.0, + new_nrows, new_ncols, data.data, )); extend_rows( - &mut res.data.as_mut_slice(), + &mut res.as_mut_slice(), nrows, new_ncols.value(), nrows, @@ -849,7 +846,7 @@ impl> Matrix { } if new_ncols.value() > ncols { - res.columns_range_mut(ncols..).fill(val.clone()); + res.columns_range_mut(ncols..).fill(val.inlined_clone()); } if new_nrows.value() > nrows { @@ -931,7 +928,7 @@ impl> Matrix { /// # In-place resizing #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix { +impl OMatrix { /// Resizes this matrix in-place. /// /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more @@ -942,20 +939,13 @@ impl OMatrix { where DefaultAllocator: Reallocator, { - // IMPORTANT TODO: this method is still UB, and we should decide how to - // update the API to take it into account. - - let placeholder = unsafe { - Matrix::new_uninitialized_generic(Dynamic::new(0), Dynamic::new(0)).assume_init() - }; - let old = mem::replace(self, placeholder); - let new = old.resize(new_nrows, new_ncols, val); - let _ = mem::replace(self, new); + // TODO: avoid the clone. + *self = self.clone().resize(new_nrows, new_ncols, val); } } #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -970,16 +960,13 @@ where where DefaultAllocator: Reallocator, { - let placeholder = - Matrix::from_fn_generic(Dynamic::new(0), self.data.shape().1, |_, _| val.clone()); - let old = mem::replace(self, placeholder); - let new = old.resize_vertically(new_nrows, val); - let _ = mem::replace(self, new); + // TODO: avoid the clone. + *self = self.clone().resize_vertically(new_nrows, val); } } #[cfg(any(feature = "std", feature = "alloc"))] -impl OMatrix +impl OMatrix where DefaultAllocator: Allocator, { @@ -994,15 +981,18 @@ where where DefaultAllocator: Reallocator, { - let placeholder = - Matrix::from_fn_generic(self.data.shape().0, Dynamic::new(0), |_, _| val.clone()); - let old = mem::replace(self, placeholder); - let new = old.resize_horizontally(new_ncols, val); - let _ = mem::replace(self, new); + // TODO: avoid the clone. + *self = self.clone().resize_horizontally(new_ncols, val); } } -unsafe fn compress_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, nremove: usize) { +unsafe fn compress_rows( + data: &mut [T], + nrows: usize, + ncols: usize, + i: usize, + nremove: usize, +) { let new_nrows = nrows - nremove; if new_nrows == 0 || ncols == 0 { @@ -1035,7 +1025,13 @@ unsafe fn compress_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, // Moves entries of a matrix buffer to make place for `ninsert` emty rows starting at the `i-th` row index. // The `data` buffer is assumed to contained at least `(nrows + ninsert) * ncols` elements. -unsafe fn extend_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, ninsert: usize) { +unsafe fn extend_rows( + data: &mut [T], + nrows: usize, + ncols: usize, + i: usize, + ninsert: usize, +) { let new_nrows = nrows + ninsert; if new_nrows == 0 || ncols == 0 { @@ -1065,7 +1061,12 @@ unsafe fn extend_rows(data: &mut [T], nrows: usize, ncols: usize, i: usize, n /// Extend the number of columns of the `Matrix` with elements from /// a given iterator. #[cfg(any(feature = "std", feature = "alloc"))] -impl> Extend for Matrix { +impl Extend for Matrix +where + T: Scalar, + R: Dim, + S: Extend, +{ /// Extend the number of columns of the `Matrix` with elements /// from the given iterator. /// @@ -1110,6 +1111,7 @@ impl> Extend for Matrix { #[cfg(any(feature = "std", feature = "alloc"))] impl Extend for Matrix where + T: Scalar, S: Extend, { /// Extend the number of rows of a `Vector` with elements @@ -1128,10 +1130,13 @@ where } #[cfg(any(feature = "std", feature = "alloc"))] -impl Extend> for Matrix +impl Extend> for Matrix where + T: Scalar, + R: Dim, S: Extend>, - SV: Storage, + RV: Dim, + SV: RawStorage, ShapeConstraint: SameNumberOfRows, { /// Extends the number of columns of a `Matrix` with `Vector`s diff --git a/src/base/indexing.rs b/src/base/indexing.rs index bb0adddb..93f41ed3 100644 --- a/src/base/indexing.rs +++ b/src/base/indexing.rs @@ -1,8 +1,8 @@ //! Indexing -use crate::base::storage::{Storage, StorageMut}; +use crate::base::storage::{RawStorage, RawStorageMut}; use crate::base::{ - Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, U1, + Const, Dim, DimDiff, DimName, DimSub, Dynamic, Matrix, MatrixSlice, MatrixSliceMut, Scalar, U1, }; use std::ops; @@ -310,7 +310,7 @@ fn dimrange_rangetoinclusive_usize() { } /// A helper trait used for indexing operations. -pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: Storage>: Sized { +pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: RawStorage>: Sized { /// The output type returned by methods. type Output: 'a; @@ -345,7 +345,7 @@ pub trait MatrixIndex<'a, T, R: Dim, C: Dim, S: Storage>: Sized { } /// A helper trait used for indexing operations. -pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: StorageMut>: +pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: RawStorageMut>: MatrixIndex<'a, T, R, C, S> { /// The output type returned by methods. @@ -476,7 +476,7 @@ pub trait MatrixIndexMut<'a, T, R: Dim, C: Dim, S: StorageMut>: /// 4, 7, /// 5, 8))); /// ``` -impl> Matrix { +impl> Matrix { /// Produces a view of the data at the given index, or /// `None` if the index is out of bounds. #[inline] @@ -494,7 +494,7 @@ impl> Matrix { #[must_use] pub fn get_mut<'a, I>(&'a mut self, index: I) -> Option where - S: StorageMut, + S: RawStorageMut, I: MatrixIndexMut<'a, T, R, C, S>, { index.get_mut(self) @@ -516,7 +516,7 @@ impl> Matrix { #[inline] pub fn index_mut<'a, I>(&'a mut self, index: I) -> I::OutputMut where - S: StorageMut, + S: RawStorageMut, I: MatrixIndexMut<'a, T, R, C, S>, { index.index_mut(self) @@ -539,7 +539,7 @@ impl> Matrix { #[must_use] pub unsafe fn get_unchecked_mut<'a, I>(&'a mut self, index: I) -> I::OutputMut where - S: StorageMut, + S: RawStorageMut, I: MatrixIndexMut<'a, T, R, C, S>, { index.get_unchecked_mut(self) @@ -548,9 +548,12 @@ impl> Matrix { // EXTRACT A SINGLE ELEMENT BY 1D LINEAR ADDRESS -impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for usize +impl<'a, T, R, C, S> MatrixIndex<'a, T, R, C, S> for usize where - S: Storage, + T: Scalar, + R: Dim, + C: Dim, + S: RawStorage, { type Output = &'a T; @@ -567,9 +570,12 @@ where } } -impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for usize +impl<'a, T, R, C, S> MatrixIndexMut<'a, T, R, C, S> for usize where - S: StorageMut, + T: Scalar, + R: Dim, + C: Dim, + S: RawStorageMut, { type OutputMut = &'a mut T; @@ -577,7 +583,7 @@ where #[inline(always)] unsafe fn get_unchecked_mut(self, matrix: &'a mut Matrix) -> Self::OutputMut where - S: StorageMut, + S: RawStorageMut, { matrix.data.get_unchecked_linear_mut(self) } @@ -585,9 +591,11 @@ where // EXTRACT A SINGLE ELEMENT BY 2D COORDINATES -impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndex<'a, T, R, C, S> for (usize, usize) +impl<'a, T: 'a, R, C, S> MatrixIndex<'a, T, R, C, S> for (usize, usize) where - S: Storage, + R: Dim, + C: Dim, + S: RawStorage, { type Output = &'a T; @@ -595,7 +603,7 @@ where #[inline(always)] fn contained_by(&self, matrix: &Matrix) -> bool { let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); DimRange::contained_by(rows, nrows) && DimRange::contained_by(cols, ncols) } @@ -607,9 +615,11 @@ where } } -impl<'a, T: 'a, R: Dim, C: Dim, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize) +impl<'a, T: 'a, R, C, S> MatrixIndexMut<'a, T, R, C, S> for (usize, usize) where - S: StorageMut, + R: Dim, + C: Dim, + S: RawStorageMut, { type OutputMut = &'a mut T; @@ -617,7 +627,7 @@ where #[inline(always)] unsafe fn get_unchecked_mut(self, matrix: &'a mut Matrix) -> Self::OutputMut where - S: StorageMut, + S: RawStorageMut, { let (row, col) = self; matrix.data.get_unchecked_mut(row, col) @@ -643,10 +653,12 @@ macro_rules! impl_index_pair { $(where $CConstraintType: ty: $CConstraintBound: ident $(<$($CConstraintBoundParams: ty $( = $CEqBound: ty )*),*>)* )*] ) => { - impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> - MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx) + impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndex<'a, T, $R, $C, S> for ($RIdx, $CIdx) where - S: Storage, + T: Scalar, + $R: Dim, + $C: Dim, + S: RawStorage, $( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)* $( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),* { @@ -656,7 +668,7 @@ macro_rules! impl_index_pair { #[inline(always)] fn contained_by(&self, matrix: &Matrix) -> bool { let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); DimRange::contained_by(rows, nrows) && DimRange::contained_by(cols, ncols) } @@ -666,21 +678,23 @@ macro_rules! impl_index_pair { use crate::base::SliceStorage; let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let data = SliceStorage::new_unchecked(&matrix.data, (rows.lower(nrows), cols.lower(ncols)), (rows.length(nrows), cols.length(ncols))); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } - impl<'a, T: 'a, $R: Dim, $C: Dim, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> - MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx) + impl<'a, T, $R, $C, S, $($RTyP : $RTyPB,)* $($CTyP : $CTyPB),*> MatrixIndexMut<'a, T, $R, $C, S> for ($RIdx, $CIdx) where - S: StorageMut, + T: Scalar, + $R: Dim, + $C: Dim, + S: RawStorageMut, $( $RConstraintType: $RConstraintBound $(<$( $RConstraintBoundParams $( = $REqBound )*),*>)* ,)* $( $CConstraintType: $CConstraintBound $(<$( $CConstraintBoundParams $( = $CEqBound )*),*>)* ),* { @@ -692,14 +706,14 @@ macro_rules! impl_index_pair { use crate::base::SliceStorageMut; let (rows, cols) = self; - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let data = SliceStorageMut::new_unchecked(&mut matrix.data, (rows.lower(nrows), cols.lower(ncols)), (rows.length(nrows), cols.length(ncols))); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } } diff --git a/src/base/iter.rs b/src/base/iter.rs index b48e8322..b68e1051 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -5,12 +5,13 @@ use std::marker::PhantomData; use std::mem; use crate::base::dimension::{Dim, U1}; -use crate::base::storage::{Storage, StorageMut}; -use crate::base::{Matrix, MatrixSlice, MatrixSliceMut}; +use crate::base::storage::{RawStorage, RawStorageMut}; +use crate::base::{Matrix, MatrixSlice, MatrixSliceMut, Scalar}; macro_rules! iterator { (struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => { /// An iterator through a dense matrix with arbitrary strides matrix. + #[derive(Debug)] pub struct $Name<'a, T, R: Dim, C: Dim, S: 'a + $Storage> { ptr: $Ptr, inner_ptr: $Ptr, @@ -170,8 +171,8 @@ macro_rules! iterator { }; } -iterator!(struct MatrixIter for Storage.ptr -> *const T, &'a T, &'a S); -iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a mut S); +iterator!(struct MatrixIter for RawStorage.ptr -> *const T, &'a T, &'a S); +iterator!(struct MatrixIterMut for RawStorageMut.ptr_mut -> *mut T, &'a mut T, &'a mut S); /* * @@ -180,18 +181,18 @@ iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut T, &'a mut T, &'a */ #[derive(Clone, Debug)] /// An iterator through the rows of a matrix. -pub struct RowIter<'a, T, R: Dim, C: Dim, S: Storage> { +pub struct RowIter<'a, T, R: Dim, C: Dim, S: RawStorage> { mat: &'a Matrix, curr: usize, } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> RowIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> RowIter<'a, T, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { RowIter { mat, curr: 0 } } } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> Iterator for RowIter<'a, T, R, C, S> { type Item = MatrixSlice<'a, T, U1, C, S::RStride, S::CStride>; #[inline] @@ -219,7 +220,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for RowIter<'a, T } } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorage> ExactSizeIterator for RowIter<'a, T, R, C, S> { #[inline] @@ -229,13 +230,14 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable rows of a matrix. -pub struct RowIterMut<'a, T, R: Dim, C: Dim, S: StorageMut> { +#[derive(Debug)] +pub struct RowIterMut<'a, T, R: Dim, C: Dim, S: RawStorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix>, } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> RowIterMut<'a, T, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { RowIterMut { mat, @@ -249,7 +251,9 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> RowIterMut<'a, T, R, C, } } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> Iterator + for RowIterMut<'a, T, R, C, S> +{ type Item = MatrixSliceMut<'a, T, U1, C, S::RStride, S::CStride>; #[inline] @@ -274,7 +278,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator for RowIterMut } } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorageMut> ExactSizeIterator for RowIterMut<'a, T, R, C, S> { #[inline] @@ -290,18 +294,18 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator */ #[derive(Clone, Debug)] /// An iterator through the columns of a matrix. -pub struct ColumnIter<'a, T, R: Dim, C: Dim, S: Storage> { +pub struct ColumnIter<'a, T, R: Dim, C: Dim, S: RawStorage> { mat: &'a Matrix, curr: usize, } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ColumnIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> ColumnIter<'a, T, R, C, S> { pub(crate) fn new(mat: &'a Matrix) -> Self { ColumnIter { mat, curr: 0 } } } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorage> Iterator for ColumnIter<'a, T, R, C, S> { type Item = MatrixSlice<'a, T, R, U1, S::RStride, S::CStride>; #[inline] @@ -329,7 +333,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> Iterator for ColumnIter<'a } } -impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorage> ExactSizeIterator for ColumnIter<'a, T, R, C, S> { #[inline] @@ -339,13 +343,14 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + Storage> ExactSizeIterator } /// An iterator through the mutable columns of a matrix. -pub struct ColumnIterMut<'a, T, R: Dim, C: Dim, S: StorageMut> { +#[derive(Debug)] +pub struct ColumnIterMut<'a, T, R: Dim, C: Dim, S: RawStorageMut> { mat: *mut Matrix, curr: usize, phantom: PhantomData<&'a mut Matrix>, } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, T, R, C, S> { +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> ColumnIterMut<'a, T, R, C, S> { pub(crate) fn new(mat: &'a mut Matrix) -> Self { ColumnIterMut { mat, @@ -359,7 +364,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ColumnIterMut<'a, T, R, } } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator +impl<'a, T, R: Dim, C: Dim, S: 'a + RawStorageMut> Iterator for ColumnIterMut<'a, T, R, C, S> { type Item = MatrixSliceMut<'a, T, R, U1, S::RStride, S::CStride>; @@ -386,7 +391,7 @@ impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> Iterator } } -impl<'a, T, R: Dim, C: Dim, S: 'a + StorageMut> ExactSizeIterator +impl<'a, T: Scalar, R: Dim, C: Dim, S: 'a + RawStorageMut> ExactSizeIterator for ColumnIterMut<'a, T, R, C, S> { #[inline] diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 8ec78264..6cca767a 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -8,8 +8,7 @@ use std::cmp::Ordering; use std::fmt; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; -use std::mem::{self, ManuallyDrop, MaybeUninit}; -use std::ptr; +use std::mem; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -26,14 +25,15 @@ use crate::base::dimension::{Dim, DimAdd, DimSum, IsNotStaticOne, U1, U2, U3}; use crate::base::iter::{ ColumnIter, ColumnIterMut, MatrixIter, MatrixIterMut, RowIter, RowIterMut, }; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, SameShapeStorage, Storage, StorageMut, -}; +use crate::base::storage::{Owned, RawStorage, RawStorageMut, SameShapeStorage}; use crate::base::{Const, DefaultAllocator, OMatrix, OVector, Scalar, Unit}; -use crate::{ArrayStorage, MatrixSlice, MatrixSliceMut, SMatrix, SimdComplexField}; +use crate::{ArrayStorage, SMatrix, SimdComplexField, Storage, UninitMatrix}; +use crate::storage::IsContiguous; +use crate::uninit::{Init, InitStatus, Uninit}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::{DMatrix, DVector, Dynamic, VecStorage}; +use std::mem::MaybeUninit; /// A square matrix. pub type SquareMatrix = Matrix; @@ -152,8 +152,8 @@ pub type MatrixCross = /// Note that mixing `Dynamic` with type-level unsigned integers is allowed. Actually, a /// dynamically-sized column vector should be represented as a `Matrix` (given /// some concrete types for `T` and a compatible data storage type `S`). -#[repr(transparent)] -#[derive(Clone, Copy, Debug)] +#[repr(C)] +#[derive(Clone, Copy)] pub struct Matrix { /// The data storage that contains all the matrix components. Disappointed? /// @@ -187,23 +187,44 @@ pub struct Matrix { // from_data_statically_unchecked. // Note that it would probably make sense to just have // the type `Matrix`, and have `T, R, C` be associated-types - // of the `Storage` trait. However, because we don't have - // specialization, this is not possible because these `T, R, C` - // allows us to disambiguate a lot of configurations. + // of the `RawStorage` trait. However, because we don't have + // specialization, this is not bossible because these `T, R, C` + // allows us to desambiguate a lot of configurations. _phantoms: PhantomData<(T, R, C)>, } -impl Default for Matrix +impl fmt::Debug for Matrix { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + formatter + .debug_struct("Matrix") + .field("data", &self.data) + .finish() + } +} + +impl Default for Matrix where - S: Storage + Default, + T: Scalar, + R: Dim, + C: Dim, + S: Default, { fn default() -> Self { - Matrix::from_data(Default::default()) + Matrix { + data: Default::default(), + _phantoms: PhantomData, + } } } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Matrix { +impl Serialize for Matrix +where + T: Scalar, + R: Dim, + C: Dim, + S: Serialize, +{ fn serialize(&self, serializer: Ser) -> Result where Ser: Serializer, @@ -213,7 +234,13 @@ impl Serialize for Matrix { } #[cfg(feature = "serde-serialize-no-std")] -impl<'de, T, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix { +impl<'de, T, R, C, S> Deserialize<'de> for Matrix +where + T: Scalar, + R: Dim, + C: Dim, + S: Deserialize<'de>, +{ fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, @@ -226,7 +253,7 @@ impl<'de, T, R: Dim, C, S: Serialize<'de>> Deserialize<'de> for Matrix Abomonation for Matrix { +impl Abomonation for Matrix { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { self.data.entomb(writer) } @@ -241,7 +268,7 @@ impl Abomonation for Matrix { } #[cfg(feature = "compare")] -impl> matrixcompare_core::Matrix +impl> matrixcompare_core::Matrix for Matrix { fn rows(&self) -> usize { @@ -258,7 +285,7 @@ impl> matrixcompare_core::Matrix> matrixcompare_core::DenseAccess +impl> matrixcompare_core::DenseAccess for Matrix { fn fetch_single(&self, row: usize, col: usize) -> T { @@ -267,13 +294,15 @@ impl> matrixcompare_core::DenseAcc } #[cfg(feature = "bytemuck")] -unsafe impl> bytemuck::Zeroable for Matrix where - S: bytemuck::Zeroable +unsafe impl> bytemuck::Zeroable + for Matrix +where + S: bytemuck::Zeroable, { } #[cfg(feature = "bytemuck")] -unsafe impl> bytemuck::Pod for Matrix +unsafe impl> bytemuck::Pod for Matrix where S: bytemuck::Pod, Self: Copy, @@ -294,7 +323,7 @@ mod rkyv_impl { &self, pos: usize, resolver: Self::Resolver, - out: &mut core::mem::MaybeUninit, + out: &mut core::meme::MaybeUninit, ) { self.data.resolve( pos + offset_of!(Self::Archived, data), @@ -328,19 +357,8 @@ mod rkyv_impl { } impl Matrix { - /// Creates a new matrix with the given data without statically checking - /// that the matrix dimension matches the storage dimension. - /// - /// There's only two instances in which you should use this method instead - /// of the safe counterpart [`from_data`]: - /// - You can't get the type checker to validate your matrices, even though - /// you're **certain** that they're of the right dimensions. - /// - You want to declare a matrix in a `const` context. - /// - /// # Safety - /// If the storage dimension does not match the matrix dimension, any other - /// method called on this matrix may behave erroneously, panic, or cause - /// Undefined Behavior. + /// Creates a new matrix with the given data without statically checking that the matrix + /// dimension matches the storage dimension. #[inline(always)] pub const unsafe fn from_data_statically_unchecked(data: S) -> Matrix { Matrix { @@ -350,29 +368,50 @@ impl Matrix { } } -/// # Memory manipulation methods. -impl OMatrix -where - DefaultAllocator: Allocator, -{ - /// Allocates a matrix with the given number of rows and columns without initializing its content. - pub fn new_uninitialized_generic(nrows: R, ncols: C) -> OMatrix, R, C> { - OMatrix::from_data( - >::allocate_uninitialized(nrows, ncols), - ) - } - - /// Converts this matrix into one whose entries need to be manually dropped. This should be - /// near zero-cost. - pub fn manually_drop(self) -> OMatrix, R, C> { - OMatrix::from_data(>::manually_drop( - self.data, - )) +impl SMatrix { + /// Creates a new statically-allocated matrix from the given [`ArrayStorage`]. + /// + /// This method exists primarily as a workaround for the fact that `from_data` can not + /// work in `const fn` contexts. + #[inline(always)] + pub const fn from_array_storage(storage: ArrayStorage) -> Self { + // This is sound because the row and column types are exactly the same as that of the + // storage, so there can be no mismatch + unsafe { Self::from_data_statically_unchecked(storage) } } } -/// # More memory manipulation methods. -impl OMatrix, R, C> +// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make +// `from_data` const fn compatible +#[cfg(any(feature = "std", feature = "alloc"))] +impl DMatrix { + /// Creates a new heap-allocated matrix from the given [`VecStorage`]. + /// + /// This method exists primarily as a workaround for the fact that `from_data` can not + /// work in `const fn` contexts. + pub const fn from_vec_storage(storage: VecStorage) -> Self { + // This is sound because the dimensions of the matrix and the storage are guaranteed + // to be the same + unsafe { Self::from_data_statically_unchecked(storage) } + } +} + +// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make +// `from_data` const fn compatible +#[cfg(any(feature = "std", feature = "alloc"))] +impl DVector { + /// Creates a new heap-allocated matrix from the given [`VecStorage`]. + /// + /// This method exists primarily as a workaround for the fact that `from_data` can not + /// work in `const fn` contexts. + pub const fn from_vec_storage(storage: VecStorage) -> Self { + // This is sound because the dimensions of the matrix and the storage are guaranteed + // to be the same + unsafe { Self::from_data_statically_unchecked(storage) } + } +} + +impl UninitMatrix where DefaultAllocator: Allocator, { @@ -388,100 +427,29 @@ where self.data, )) } - - /// Assumes a matrix's entries to be initialized, and drops them in place. - /// This allows the buffer to be safely reused. - /// - /// # Safety - /// All of the matrix's entries need to be uninitialized. Otherwise, - /// Undefined Behavior will be triggered. - pub unsafe fn reinitialize(&mut self) { - for i in 0..self.nrows() { - for j in 0..self.ncols() { - ptr::drop_in_place(self.get_unchecked_mut((i, j))); - } - } - } } -impl Matrix, R, C, S> { - /// Creates a full slice from `self` and assumes it to be initialized. - /// - /// # Safety - /// The user must make sure that every single entry of the buffer has been initialized, - /// or Undefined Behavior will immediately occur. - pub unsafe fn assume_init_ref(&self) -> MatrixSlice - where - S: Storage, R, C>, - { - self.full_slice().slice_assume_init() - } - - /// Creates a full mutable slice from `self` and assumes it to be initialized. - /// - /// # Safety - /// The user must make sure that every single entry of the buffer has been initialized, - /// or Undefined Behavior will immediately occur. - pub unsafe fn assume_init_mut(&mut self) -> MatrixSliceMut - where - S: StorageMut, R, C>, - { - self.full_slice_mut().slice_assume_init() - } -} - -impl SMatrix { - /// Creates a new statically-allocated matrix from the given [`ArrayStorage`]. - /// - /// This method exists primarily as a workaround for the fact that `from_data` can not - /// work in `const fn` contexts. - #[inline(always)] - pub const fn from_array_storage(storage: ArrayStorage) -> Self { - // Safety: This is sound because the row and column types are exactly - // the same as that of the storage, so there can be no mismatch. - unsafe { Self::from_data_statically_unchecked(storage) } - } -} - -// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make -// `from_data` const fn compatible -#[cfg(any(feature = "std", feature = "alloc"))] -impl DMatrix { - /// Creates a new heap-allocated matrix from the given [`VecStorage`]. - /// - /// This method exists primarily as a workaround for the fact that `from_data` can not - /// work in `const fn` contexts. - pub const fn from_vec_storage(storage: VecStorage) -> Self { - // Safety: This is sound because the dimensions of the matrix and the - // storage are guaranteed to be the same. - unsafe { Self::from_data_statically_unchecked(storage) } - } -} - -// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make -// `from_data` const fn compatible -#[cfg(any(feature = "std", feature = "alloc"))] -impl DVector { - /// Creates a new heap-allocated matrix from the given [`VecStorage`]. - /// - /// This method exists primarily as a workaround for the fact that `from_data` can not - /// work in `const fn` contexts. - pub const fn from_vec_storage(storage: VecStorage) -> Self { - // Safety: This is sound because the dimensions of the matrix and the - // storage are guaranteed to be the same. - unsafe { Self::from_data_statically_unchecked(storage) } - } -} - -impl> Matrix { +impl> Matrix { /// Creates a new matrix with the given data. #[inline(always)] pub fn from_data(data: S) -> Self { - // Safety: This is sound because the dimensions of the matrix and the - // storage are guaranteed to be the same. unsafe { Self::from_data_statically_unchecked(data) } } + /// Creates a new uninitialized matrix with the given uninitialized data + pub unsafe fn from_uninitialized_data(data: MaybeUninit) -> MaybeUninit { + let res: Matrix> = Matrix { + data, + _phantoms: PhantomData, + }; + let res: MaybeUninit>> = MaybeUninit::new(res); + // safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque. + // with s/transmute_copy/transmute/, rustc claims that `MaybeUninit>>` may be of a different size from `MaybeUninit>` + // but MaybeUninit's documentation says "MaybeUninit is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size + let res: MaybeUninit> = mem::transmute_copy(&res); + res + } + /// The shape of this matrix returned as the tuple (number of rows, number of columns). /// /// # Examples: @@ -493,10 +461,16 @@ impl> Matrix { #[inline] #[must_use] pub fn shape(&self) -> (usize, usize) { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); (nrows.value(), ncols.value()) } + #[inline] + #[must_use] + pub fn shape_generic(&self) -> (R, C) { + self.data.shape() + } + /// The number of rows of this matrix. /// /// # Examples: @@ -535,7 +509,6 @@ impl> Matrix { /// let slice = mat.slice_with_steps((0, 0), (5, 3), (1, 2)); /// // The column strides is the number of steps (here 2) multiplied by the corresponding dimension. /// assert_eq!(mat.strides(), (1, 10)); - /// ``` #[inline] #[must_use] pub fn strides(&self) -> (usize, usize) { @@ -595,7 +568,7 @@ impl> Matrix { /// See `relative_eq` from the `RelativeEq` trait for more details. #[inline] #[must_use] - pub fn relative_eq( + pub fn relative_eq( &self, other: &Matrix, eps: T::Epsilon, @@ -603,6 +576,8 @@ impl> Matrix { ) -> bool where T: RelativeEq, + R2: Dim, + C2: Dim, SB: Storage, T::Epsilon: Copy, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, @@ -617,10 +592,12 @@ impl> Matrix { #[inline] #[must_use] #[allow(clippy::should_implement_trait)] - pub fn eq(&self, other: &Matrix) -> bool + pub fn eq(&self, other: &Matrix) -> bool where T: PartialEq, - SB: Storage, + R2: Dim, + C2: Dim, + SB: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { assert!(self.shape() == other.shape()); @@ -631,10 +608,11 @@ impl> Matrix { #[inline] pub fn into_owned(self) -> OMatrix where - T: Clone, + T: Scalar, + S: Storage, DefaultAllocator: Allocator, { - Matrix::from_data(self.data.into_owned().0) + Matrix::from_data(self.data.into_owned()) } // TODO: this could probably benefit from specialization. @@ -642,24 +620,24 @@ impl> Matrix { /// Moves this matrix into one that owns its data. The actual type of the result depends on /// matrix storage combination rules for addition. #[inline] - pub fn into_owned_sum(self) -> MatrixSum + pub fn into_owned_sum(self) -> MatrixSum where - T: Clone, + T: Scalar, + S: Storage, + R2: Dim, + C2: Dim, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { - // If both storages are the same, we can just return `self.into_owned()`. - // Unfortunately, it's not trivial to convince the compiler of this. - if TypeId::of::>() == TypeId::of::() - && TypeId::of::>() == TypeId::of::() - { - // Safety: we're transmuting from a type into itself, and we make - // sure not to leak anything. + if TypeId::of::>() == TypeId::of::>() { + // We can just return `self.into_owned()`. + unsafe { - let mat = self.into_owned(); - let mat_copy = mem::transmute_copy(&mat); - mem::forget(mat); - mat_copy + // TODO: check that those copies are optimized away by the compiler. + let owned = self.into_owned(); + let res = mem::transmute_copy(&owned); + mem::forget(owned); + res } } else { self.clone_owned_sum() @@ -671,19 +649,23 @@ impl> Matrix { #[must_use] pub fn clone_owned(&self) -> OMatrix where - T: Clone, + T: Scalar, + S: Storage, DefaultAllocator: Allocator, { - Matrix::from_data(self.data.clone_owned().0) + Matrix::from_data(self.data.clone_owned()) } /// Clones this matrix into one that owns its data. The actual type of the result depends on /// matrix storage combination rules for addition. #[inline] #[must_use] - pub fn clone_owned_sum(&self) -> MatrixSum + pub fn clone_owned_sum(&self) -> MatrixSum where - T: Clone, + T: Scalar, + S: Storage, + R2: Dim, + C2: Dim, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -691,122 +673,110 @@ impl> Matrix { let nrows: SameShapeR = Dim::from_usize(nrows); let ncols: SameShapeC = Dim::from_usize(ncols); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); - // TODO: use copy_from - for j in 0..res.ncols() { - for i in 0..res.nrows() { - unsafe { + unsafe { + // TODO: use copy_from? + for j in 0..res.ncols() { + for i in 0..res.nrows() { *res.get_unchecked_mut((i, j)) = - MaybeUninit::new(self.get_unchecked((i, j)).clone()); + MaybeUninit::new(self.get_unchecked((i, j)).inlined_clone()); } } - } - unsafe { res.assume_init() } + // SAFETY: the output has been initialized above. + res.assume_init() + } } - /// Transposes `self` and store the result into `out`, which will become - /// fully initialized. + /// Transposes `self` and store the result into `out`. #[inline] - pub fn transpose_to(&self, out: &mut Matrix, R2, C2, SB>) - where - T: Clone, - SB: StorageMut, R2, C2>, + fn transpose_to_uninit( + &self, + status: Status, + out: &mut Matrix, + ) where + Status: InitStatus, + T: Scalar, + R2: Dim, + C2: Dim, + SB: RawStorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); assert!( (ncols, nrows) == out.shape(), - "Incompatible shape for transpose-copy." + "Incompatible shape for transposition." ); // TODO: optimize that. for i in 0..nrows { for j in 0..ncols { + // Safety: the indices are in range. unsafe { - *out.get_unchecked_mut((j, i)) = - MaybeUninit::new(self.get_unchecked((i, j)).clone()); + Status::init( + out.get_unchecked_mut((j, i)), + self.get_unchecked((i, j)).inlined_clone(), + ); } } } } + /// Transposes `self` and store the result into `out`. + #[inline] + pub fn transpose_to(&self, out: &mut Matrix) + where + T: Scalar, + R2: Dim, + C2: Dim, + SB: RawStorageMut, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.transpose_to_uninit(Init, out) + } + /// Transposes `self`. #[inline] #[must_use = "Did you mean to use transpose_mut()?"] pub fn transpose(&self) -> OMatrix where - T: Clone, + T: Scalar, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); - self.transpose_to(&mut res); + let (nrows, ncols) = self.shape_generic(); - unsafe { - // Safety: res is now fully initialized due to the guarantees of transpose_to. - res.assume_init() - } - } -} - -impl OMatrix -where - DefaultAllocator: Allocator + Allocator, -{ - /// Transposes `self`. Does not require `T: Clone` like its other counterparts. - pub fn transpose_into(self) -> OMatrix { - let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); - let mut md = self.manually_drop(); - - let (nrows, ncols) = res.shape(); - - // TODO: optimize that. - for i in 0..nrows { - for j in 0..ncols { - // Safety: the indices are within range, and since the indices - // don't repeat, we don't do any double-drops. - unsafe { - *res.get_unchecked_mut((j, i)) = - MaybeUninit::new(ManuallyDrop::take(md.get_unchecked_mut((i, j)))); - } - } - } - - unsafe { - // Safety: res is now fully initialized, since we've initialized - // every single entry. - res.assume_init() - } + let mut res = Matrix::uninit(ncols, nrows); + self.transpose_to_uninit(Uninit, &mut res); + // Safety: res is now fully initialized. + unsafe { res.assume_init() } } } /// # Elementwise mapping and folding -// Todo: maybe make ref versions of these methods that can be used when T is expensive to clone? -impl> Matrix { +impl> Matrix { /// Returns a matrix containing the result of `f` applied to each of its entries. #[inline] #[must_use] - pub fn map T2>(&self, mut f: F) -> OMatrix + pub fn map T2>(&self, mut f: F) -> OMatrix where - T: Clone, + T: Scalar, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); + let a = self.data.get_unchecked(i, j).inlined_clone(); *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a)); } } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -819,8 +789,9 @@ impl> Matrix { /// let q2 = q.cast::(); /// assert_eq!(q2, Vector3::new(1.0f32, 2.0, 3.0)); /// ``` - pub fn cast(self) -> OMatrix + pub fn cast(self) -> OMatrix where + T: Scalar, OMatrix: SupersetOf, DefaultAllocator: Allocator, { @@ -840,7 +811,10 @@ impl> Matrix { &self, init_f: impl FnOnce(Option<&T>) -> T2, f: impl FnMut(T2, &T) -> T2, - ) -> T2 { + ) -> T2 + where + T: Scalar, + { let mut it = self.iter(); let init = init_f(it.next()); it.fold(init, f) @@ -850,28 +824,28 @@ impl> Matrix { /// `f` also gets passed the row and column index, i.e. `f(row, col, value)`. #[inline] #[must_use] - pub fn map_with_location T2>( + pub fn map_with_location T2>( &self, mut f: F, ) -> OMatrix where - T: Clone, + T: Scalar, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); + let a = self.data.get_unchecked(i, j).inlined_clone(); *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(i, j, a)); } } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -879,20 +853,17 @@ impl> Matrix { /// `rhs`. #[inline] #[must_use] - pub fn zip_map( - &self, - rhs: &Matrix, - mut f: F, - ) -> OMatrix + pub fn zip_map(&self, rhs: &Matrix, mut f: F) -> OMatrix where - T: Clone, - S2: Storage, + T: Scalar, + T2: Scalar, + N3: Scalar, + S2: RawStorage, F: FnMut(T, T2) -> N3, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); assert_eq!( (nrows.value(), ncols.value()), @@ -902,15 +873,16 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); - let b = rhs.data.get_unchecked(i, j).clone(); - *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b)); + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = rhs.data.get_unchecked(i, j).inlined_clone(); + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b)) } } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -918,22 +890,24 @@ impl> Matrix { /// `b`, and `c`. #[inline] #[must_use] - pub fn zip_zip_map( + pub fn zip_zip_map( &self, b: &Matrix, c: &Matrix, mut f: F, ) -> OMatrix where - T: Clone, - S2: Storage, - S3: Storage, + T: Scalar, + T2: Scalar, + N3: Scalar, + N4: Scalar, + S2: RawStorage, + S3: RawStorage, F: FnMut(T, T2, N3) -> N4, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); - - let mut res = OMatrix::new_uninitialized_generic(nrows, ncols); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(nrows, ncols); assert_eq!( (nrows.value(), ncols.value()), @@ -948,55 +922,64 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); - let b = b.data.get_unchecked(i, j).clone(); - let c = c.data.get_unchecked(i, j).clone(); - *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b, c)); + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = b.data.get_unchecked(i, j).inlined_clone(); + let c = c.data.get_unchecked(i, j).inlined_clone(); + *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b, c)) } } } - // Safety: all entries have been initialized. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } /// Folds a function `f` on each entry of `self`. #[inline] #[must_use] - pub fn fold(&self, mut init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc + pub fn fold(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc where - T: Clone, + T: Scalar, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); + + let mut res = init; for j in 0..ncols.value() { for i in 0..nrows.value() { + // Safety: all indices are in range. unsafe { - let a = self.data.get_unchecked(i, j).clone(); - init = f(init, a) + let a = self.data.get_unchecked(i, j).inlined_clone(); + res = f(res, a) } } } - init + res } /// Folds a function `f` on each pairs of entries from `self` and `rhs`. #[inline] #[must_use] - pub fn zip_fold( + pub fn zip_fold( &self, rhs: &Matrix, - mut init: Acc, + init: Acc, mut f: impl FnMut(Acc, T, T2) -> Acc, ) -> Acc where - T: Clone, - S2: Storage, + T: Scalar, + T2: Scalar, + R2: Dim, + C2: Dim, + S2: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); + + let mut res = init; assert_eq!( (nrows.value(), ncols.value()), @@ -1007,22 +990,21 @@ impl> Matrix { for j in 0..ncols.value() { for i in 0..nrows.value() { unsafe { - let a = self.data.get_unchecked(i, j).clone(); - let b = rhs.data.get_unchecked(i, j).clone(); - init = f(init, a, b) + let a = self.data.get_unchecked(i, j).inlined_clone(); + let b = rhs.data.get_unchecked(i, j).inlined_clone(); + res = f(res, a, b) } } } - init + res } - /// Replaces each component of `self` by the result of a closure `f` applied on it. + /// Applies a closure `f` to modify each component of `self`. #[inline] - pub fn apply T>(&mut self, mut f: F) + pub fn apply(&mut self, mut f: F) where - T: Clone, // This could be removed by changing the function signature. - S: StorageMut, + S: RawStorageMut, { let (nrows, ncols) = self.shape(); @@ -1030,7 +1012,7 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - *e = f(e.clone()) + f(e) } } } @@ -1039,14 +1021,16 @@ impl> Matrix { /// Replaces each component of `self` by the result of a closure `f` applied on its components /// joined with the components from `rhs`. #[inline] - pub fn zip_apply( + pub fn zip_apply( &mut self, rhs: &Matrix, - mut f: impl FnMut(T, T2) -> T, + mut f: impl FnMut(&mut T, T2), ) where - T: Clone, // This could be removed by changing the function signature. - S: StorageMut, - S2: Storage, + S: RawStorageMut, + T2: Scalar, + R2: Dim, + C2: Dim, + S2: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); @@ -1061,8 +1045,8 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let rhs = rhs.get_unchecked((i, j)).clone(); - *e = f(e.clone(), rhs) + let rhs = rhs.get_unchecked((i, j)).inlined_clone(); + f(e, rhs) } } } @@ -1071,16 +1055,21 @@ impl> Matrix { /// Replaces each component of `self` by the result of a closure `f` applied on its components /// joined with the components from `b` and `c`. #[inline] - pub fn zip_zip_apply( + pub fn zip_zip_apply( &mut self, b: &Matrix, c: &Matrix, - mut f: impl FnMut(T, T2, N3) -> T, + mut f: impl FnMut(&mut T, T2, N3), ) where - T: Clone, // This could be removed by changing the function signature. - S: StorageMut, - S2: Storage, - S3: Storage, + S: RawStorageMut, + T2: Scalar, + R2: Dim, + C2: Dim, + S2: RawStorage, + N3: Scalar, + R3: Dim, + C3: Dim, + S3: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -1101,9 +1090,9 @@ impl> Matrix { for i in 0..nrows { unsafe { let e = self.data.get_unchecked_mut(i, j); - let b = b.get_unchecked((i, j)).clone(); - let c = c.get_unchecked((i, j)).clone(); - *e = f(e.clone(), b, c) + let b = b.get_unchecked((i, j)).inlined_clone(); + let c = c.get_unchecked((i, j)).inlined_clone(); + f(e, b, c) } } } @@ -1111,7 +1100,7 @@ impl> Matrix { } /// # Iteration on components, rows, and columns -impl> Matrix { +impl> Matrix { /// Iterates through this matrix coordinates in column-major order. /// /// # Examples: @@ -1168,7 +1157,7 @@ impl> Matrix { #[inline] pub fn iter_mut(&mut self) -> MatrixIterMut<'_, T, R, C, S> where - S: StorageMut, + S: RawStorageMut, { MatrixIterMut::new(&mut self.data) } @@ -1191,7 +1180,7 @@ impl> Matrix { #[inline] pub fn row_iter_mut(&mut self) -> RowIterMut<'_, T, R, C, S> where - S: StorageMut, + S: RawStorageMut, { RowIterMut::new(self) } @@ -1214,13 +1203,13 @@ impl> Matrix { #[inline] pub fn column_iter_mut(&mut self) -> ColumnIterMut<'_, T, R, C, S> where - S: StorageMut, + S: RawStorageMut, { ColumnIterMut::new(self) } } -impl> Matrix { +impl> Matrix { /// Returns a mutable pointer to the start of the matrix. /// /// If the matrix is not empty, this pointer is guaranteed to be aligned @@ -1257,10 +1246,7 @@ impl> Matrix { /// /// The components of the slice are assumed to be ordered in column-major order. #[inline] - pub fn copy_from_slice(&mut self, slice: &[T]) - where - T: Clone, - { + pub fn copy_from_slice(&mut self, slice: &[T]) { let (nrows, ncols) = self.shape(); assert!( @@ -1271,34 +1257,21 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).clone(); + *self.get_unchecked_mut((i, j)) = + slice.get_unchecked(i + j * nrows).inlined_clone(); } } } } - /// Fills this matrix with the content of another one via clones. Both must have the same shape. + /// Fills this matrix with the content of another one. Both must have the same shape. #[inline] - pub fn copy_from(&mut self, other: &Matrix) + pub fn copy_from(&mut self, other: &Matrix) where - T: Clone, - SB: Storage, + R2: Dim, + C2: Dim, + SB: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - { - self.copy_from_fn(other, T::clone) - } - - /// Fills this matrix with the content of another one, after applying a function to - /// the references of the entries of the other matrix. Both must have the same shape. - #[inline] - pub fn copy_from_fn( - &mut self, - other: &Matrix, - mut f: F, - ) where - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - F: FnMut(&U) -> T, { assert!( self.shape() == other.shape(), @@ -1308,71 +1281,20 @@ impl> Matrix { for j in 0..self.ncols() { for i in 0..self.nrows() { unsafe { - *self.get_unchecked_mut((i, j)) = f(other.get_unchecked((i, j))); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).inlined_clone(); } } } } - /// Fills this matrix with the content of another one, after applying a function to - /// the entries of the other matrix. Both must have the same shape. + /// Fills this matrix with the content of the transpose another one. #[inline] - pub fn move_from(&mut self, other: OMatrix) + pub fn tr_copy_from(&mut self, other: &Matrix) where - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - DefaultAllocator: Allocator, - { - self.move_from_fn(other, |e| e) - } - - /// Fills this matrix with the content of another one via moves. Both must have the same shape. - #[inline] - pub fn move_from_fn(&mut self, other: OMatrix, mut f: F) - where - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - DefaultAllocator: Allocator, - F: FnMut(U) -> T, - { - assert!( - self.shape() == other.shape(), - "Unable to move from a matrix with a different shape." - ); - - let mut md = other.manually_drop(); - - for j in 0..self.ncols() { - for i in 0..self.nrows() { - unsafe { - *self.get_unchecked_mut((i, j)) = - f(ManuallyDrop::take(md.get_unchecked_mut((i, j)))); - } - } - } - } - - /// Fills this matrix with the content of the transpose another one via clones. - #[inline] - pub fn tr_copy_from(&mut self, other: &Matrix) - where - T: Clone, - SB: Storage, + R2: Dim, + C2: Dim, + SB: RawStorage, ShapeConstraint: DimEq + SameNumberOfColumns, - { - self.tr_copy_from_fn(other, T::clone) - } - - /// Fills this matrix with the content of the transpose of another one, after applying - /// a function to the references of the entries of the other matrix. Both must have the - /// same shape. - #[inline] - pub fn tr_copy_from_fn( - &mut self, - other: &Matrix, - mut f: F, - ) where - SB: Storage, - ShapeConstraint: DimEq + SameNumberOfColumns, - F: FnMut(&U) -> T, { let (nrows, ncols) = self.shape(); assert!( @@ -1383,44 +1305,7 @@ impl> Matrix { for j in 0..ncols { for i in 0..nrows { unsafe { - *self.get_unchecked_mut((i, j)) = f(other.get_unchecked((j, i))); - } - } - } - } - - /// Fills this matrix with the content of the transpose another one via moves. - #[inline] - pub fn tr_move_from(&mut self, other: OMatrix) - where - DefaultAllocator: Allocator, - ShapeConstraint: DimEq + SameNumberOfColumns, - { - self.tr_move_from_fn(other, |e| e) - } - - /// Fills this matrix with the content of the transpose of another one, after applying - /// a function to the entries of the other matrix. Both must have the same shape. - #[inline] - pub fn tr_move_from_fn(&mut self, other: OMatrix, mut f: F) - where - ShapeConstraint: DimEq + SameNumberOfColumns, - DefaultAllocator: Allocator, - F: FnMut(U) -> T, - { - let (nrows, ncols) = self.shape(); - assert!( - (ncols, nrows) == other.shape(), - "Unable to move from a matrix with incompatible shape." - ); - - let mut md = other.manually_drop(); - - for j in 0..ncols { - for i in 0..nrows { - unsafe { - *self.get_unchecked_mut((i, j)) = - f(ManuallyDrop::take(md.get_unchecked_mut((j, i)))); + *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).inlined_clone(); } } } @@ -1429,62 +1314,13 @@ impl> Matrix { // TODO: rename `apply` to `apply_mut` and `apply_into` to `apply`? /// Returns `self` with each of its components replaced by the result of a closure `f` applied on it. #[inline] - pub fn apply_into T>(mut self, f: F) -> Self - where - T: Clone, - { + pub fn apply_into(mut self, f: F) -> Self { self.apply(f); self } } -impl, R, C>> Matrix, R, C, S> { - /// Initializes this matrix with the content of another one via clones. Both must have the same shape. - #[inline] - pub fn copy_init_from(&mut self, other: &Matrix) - where - T: Clone, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - { - self.copy_from_fn(other, |e| MaybeUninit::new(e.clone())) - } - - /// Initializes this matrix with the content of another one, after applying a function to - /// the entries of the other matrix. Both must have the same shape. - #[inline] - pub fn move_init_from(&mut self, other: OMatrix) - where - SB: Storage, - DefaultAllocator: Allocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, - { - self.move_from_fn(other, MaybeUninit::new) - } - - /// Initializes this matrix with the content of the transpose another one via clones. - #[inline] - pub fn tr_copy_init_from(&mut self, other: &Matrix) - where - T: Clone, - SB: Storage, - ShapeConstraint: DimEq + SameNumberOfColumns, - { - self.tr_copy_from_fn(other, |e| MaybeUninit::new(e.clone())) - } - - /// Initializes this matrix with the content of the transpose another one via moves. - #[inline] - pub fn tr_move_init_from(&mut self, other: OMatrix) - where - DefaultAllocator: Allocator, - ShapeConstraint: DimEq + SameNumberOfColumns, - { - self.tr_move_from_fn(other, MaybeUninit::new) - } -} - -impl> Vector { +impl> Vector { /// Gets a reference to the i-th element of this column vector without bound checking. #[inline] #[must_use] @@ -1495,7 +1331,7 @@ impl> Vector { } } -impl> Vector { +impl> Vector { /// Gets a mutable reference to the i-th element of this column vector without bound checking. #[inline] #[must_use] @@ -1506,25 +1342,27 @@ impl> Vector { } } -impl> Matrix { +impl + IsContiguous> Matrix { /// Extracts a slice containing the entire matrix entries ordered column-by-columns. #[inline] #[must_use] pub fn as_slice(&self) -> &[T] { - self.data.as_slice() + // Safety: this is OK thanks to the IsContiguous trait. + unsafe { self.data.as_slice_unchecked() } } } -impl> Matrix { +impl + IsContiguous> Matrix { /// Extracts a mutable slice containing the entire matrix entries ordered column-by-columns. #[inline] #[must_use] pub fn as_mut_slice(&mut self) -> &mut [T] { - self.data.as_mut_slice() + // Safety: this is OK thanks to the IsContiguous trait. + unsafe { self.data.as_mut_slice_unchecked() } } } -impl> Matrix { +impl> Matrix { /// Transposes the square matrix `self` in-place. pub fn transpose_mut(&mut self) { assert!( @@ -1542,12 +1380,18 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Takes the adjoint (aka. conjugate-transpose) of `self` and store the result into `out`. #[inline] - pub fn adjoint_to(&self, out: &mut Matrix, R2, C2, SB>) - where - SB: StorageMut, R2, C2>, + fn adjoint_to_uninit( + &self, + status: Status, + out: &mut Matrix, + ) where + Status: InitStatus, + R2: Dim, + C2: Dim, + SB: RawStorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { let (nrows, ncols) = self.shape(); @@ -1559,14 +1403,29 @@ impl> Matrix(&self, out: &mut Matrix) + where + R2: Dim, + C2: Dim, + SB: RawStorageMut, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, + { + self.adjoint_to_uninit(Init, out) + } + /// The adjoint (aka. conjugate-transpose) of `self`. #[inline] #[must_use = "Did you mean to use adjoint_mut()?"] @@ -1574,21 +1433,23 @@ impl> Matrix, { - let (nrows, ncols) = self.data.shape(); - let mut res = OMatrix::new_uninitialized_generic(ncols, nrows); - self.adjoint_to(&mut res); + let (nrows, ncols) = self.shape_generic(); + let mut res = Matrix::uninit(ncols, nrows); + self.adjoint_to_uninit(Uninit, &mut res); + + // Safety: res is now fully initialized. unsafe { res.assume_init() } } /// Takes the conjugate and transposes `self` and store the result into `out`. #[deprecated(note = "Renamed `self.adjoint_to(out)`.")] #[inline] - pub fn conjugate_transpose_to( - &self, - out: &mut Matrix, R2, C2, SB>, - ) where - SB: StorageMut, R2, C2>, + pub fn conjugate_transpose_to(&self, out: &mut Matrix) + where + R2: Dim, + C2: Dim, + SB: RawStorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { self.adjoint_to(out) @@ -1635,27 +1496,27 @@ impl> Matrix> Matrix { +impl> Matrix { /// The conjugate of the complex matrix `self` computed in-place. #[inline] pub fn conjugate_mut(&mut self) { - self.apply(|e| e.simd_conjugate()) + self.apply(|e| *e = e.simd_conjugate()) } /// Divides each component of the complex matrix `self` by the given real. #[inline] pub fn unscale_mut(&mut self, real: T::SimdRealField) { - self.apply(|e| e.simd_unscale(real)) + self.apply(|e| *e = e.simd_unscale(real)) } /// Multiplies each component of the complex matrix `self` by the given real. #[inline] pub fn scale_mut(&mut self, real: T::SimdRealField) { - self.apply(|e| e.simd_scale(real)) + self.apply(|e| *e = e.simd_scale(real)) } } -impl> Matrix { +impl> Matrix { /// Sets `self` to its adjoint. #[deprecated(note = "Renamed to `self.adjoint_mut()`.")] pub fn conjugate_transform_mut(&mut self) { @@ -1691,13 +1552,12 @@ impl> Matrix { } } -impl> SquareMatrix { +impl> SquareMatrix { /// The diagonal of this matrix. #[inline] #[must_use] pub fn diagonal(&self) -> OVector where - T: Clone, DefaultAllocator: Allocator, { self.map_diagonal(|e| e) @@ -1708,9 +1568,8 @@ impl> SquareMatrix { /// This is a more efficient version of `self.diagonal().map(f)` since this /// allocates only once. #[must_use] - pub fn map_diagonal(&self, mut f: impl FnMut(T) -> T2) -> OVector + pub fn map_diagonal(&self, mut f: impl FnMut(T) -> T2) -> OVector where - T: Clone, DefaultAllocator: Allocator, { assert!( @@ -1718,17 +1577,18 @@ impl> SquareMatrix { "Unable to get the diagonal of a non-square matrix." ); - let dim = self.data.shape().0; - let mut res = OVector::new_uninitialized_generic(dim, Const::<1>); + let dim = self.shape_generic().0; + let mut res = Matrix::uninit(dim, Const::<1>); for i in 0..dim.value() { + // Safety: all indices are in range. unsafe { *res.vget_unchecked_mut(i) = - MaybeUninit::new(f(self.get_unchecked((i, i)).clone())); + MaybeUninit::new(f(self.get_unchecked((i, i)).inlined_clone())); } } - // Safety: we have initialized all entries. + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -1744,7 +1604,7 @@ impl> SquareMatrix { "Cannot compute the trace of non-square matrix." ); - let dim = self.data.shape().0; + let dim = self.shape_generic().0; let mut res = T::zero(); for i in 0..dim.value() { @@ -1792,7 +1652,7 @@ impl> SquareMatrix { } } -impl + IsNotStaticOne, S: Storage> +impl + IsNotStaticOne, S: RawStorage> Matrix { /// Yields the homogeneous matrix for this matrix, i.e., appending an additional dimension and @@ -1809,13 +1669,13 @@ impl + IsNotStaticOne, S: Storage ); let dim = DimSum::::from_usize(self.nrows() + 1); let mut res = OMatrix::identity_generic(dim, dim); - res.generic_slice_mut::((0, 0), self.data.shape()) + res.generic_slice_mut::((0, 0), self.shape_generic()) .copy_from(self); res } } -impl, S: Storage> Vector { +impl, S: RawStorage> Vector { /// Computes the coordinates in projective space of this vector, i.e., appends a `0` to its /// coordinates. #[inline] @@ -1832,7 +1692,7 @@ impl, S: Storage> Vector { #[inline] pub fn from_homogeneous(v: Vector, SB>) -> Option> where - SB: Storage>, + SB: RawStorage>, DefaultAllocator: Allocator, { if v[v.len() - 1].is_zero() { @@ -1844,7 +1704,7 @@ impl, S: Storage> Vector { } } -impl, S: Storage> Vector { +impl, S: RawStorage> Vector { /// Constructs a new vector of higher dimension by appending `element` to the end of `self`. #[inline] #[must_use] @@ -1854,19 +1714,22 @@ impl, S: Storage> Vector { { let len = self.len(); let hnrows = DimSum::::from_usize(len + 1); - let mut res = OVector::new_uninitialized_generic(hnrows, Const::<1>); - res.generic_slice_mut((0, 0), self.data.shape()) - .copy_from_fn(self, |e| MaybeUninit::new(e.clone())); + let mut res = Matrix::uninit(hnrows, Const::<1>); + // This is basically a copy_from except that we warp the copied + // values into MaybeUninit. + res.generic_slice_mut((0, 0), self.shape_generic()) + .zip_apply(self, |out, e| *out = MaybeUninit::new(e)); res[(len, 0)] = MaybeUninit::new(element); + // Safety: res has been fully initialized. unsafe { res.assume_init() } } } impl AbsDiffEq for Matrix where - T: AbsDiffEq, - S: Storage, + T: Scalar + AbsDiffEq, + S: RawStorage, T::Epsilon: Copy, { type Epsilon = T::Epsilon; @@ -1886,7 +1749,7 @@ where impl RelativeEq for Matrix where - T: RelativeEq, + T: Scalar + RelativeEq, S: Storage, T::Epsilon: Copy, { @@ -1908,8 +1771,8 @@ where impl UlpsEq for Matrix where - T: UlpsEq, - S: Storage, + T: Scalar + UlpsEq, + S: RawStorage, T::Epsilon: Copy, { #[inline] @@ -1926,9 +1789,10 @@ where } } -impl PartialOrd for Matrix +impl PartialOrd for Matrix where - S: Storage, + T: Scalar + PartialOrd, + S: RawStorage, { #[inline] fn partial_cmp(&self, other: &Self) -> Option { @@ -2017,13 +1881,22 @@ where } } -impl Eq for Matrix where S: Storage {} - -impl PartialEq> - for Matrix +impl Eq for Matrix where - S: Storage, - S2: Storage, + T: Scalar + Eq, + S: RawStorage, +{ +} + +impl PartialEq> for Matrix +where + T: Scalar + PartialEq, + C: Dim, + C2: Dim, + R: Dim, + R2: Dim, + S: RawStorage, + S2: RawStorage, { #[inline] fn eq(&self, right: &Matrix) -> bool { @@ -2036,7 +1909,7 @@ macro_rules! impl_fmt { impl $trait for Matrix where T: Scalar + $trait, - S: Storage, + S: RawStorage, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { #[cfg(feature = "std")] @@ -2140,7 +2013,7 @@ mod tests { } /// # Cross product -impl> +impl> Matrix { /// The perpendicular product between two 2D column vectors, i.e. `a.x * b.y - a.y * b.x`. @@ -2150,7 +2023,7 @@ impl, + SB: RawStorage, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + SameNumberOfRows @@ -2176,12 +2049,11 @@ impl( - &self, - b: &Matrix, - ) -> MatrixCross + pub fn cross(&self, b: &Matrix) -> MatrixCross where - SB: Storage, + R2: Dim, + C2: Dim, + SB: RawStorage, DefaultAllocator: SameShapeAllocator, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns, { @@ -2198,7 +2070,7 @@ impl::from_usize(3); let ncols = SameShapeC::::from_usize(1); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((1, 0)); @@ -2221,6 +2093,7 @@ impl::from_usize(1); let ncols = SameShapeC::::from_usize(3); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res = Matrix::uninit(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((0, 1)); @@ -2251,13 +2124,14 @@ impl> Vector { +impl> Vector { /// Computes the matrix `M` such that for all vector `v` we have `M * v == self.cross(&v)`. #[inline] #[must_use] @@ -2299,9 +2173,10 @@ impl> Matrix AbsDiffEq for Unit> +impl AbsDiffEq for Unit> where - S: Storage, + T: Scalar + AbsDiffEq, + S: RawStorage, T::Epsilon: Copy, { type Epsilon = T::Epsilon; @@ -2317,8 +2192,9 @@ where } } -impl RelativeEq for Unit> +impl RelativeEq for Unit> where + T: Scalar + RelativeEq, S: Storage, T::Epsilon: Copy, { @@ -2339,9 +2215,10 @@ where } } -impl UlpsEq for Unit> +impl UlpsEq for Unit> where - S: Storage, + T: Scalar + UlpsEq, + S: RawStorage, T::Epsilon: Copy, { #[inline] @@ -2355,9 +2232,12 @@ where } } -impl Hash for Matrix +impl Hash for Matrix where - S: Storage, + T: Scalar + Hash, + R: Dim, + C: Dim, + S: RawStorage, { fn hash(&self, state: &mut H) { let (nrows, ncols) = self.shape(); diff --git a/src/base/matrix_simba.rs b/src/base/matrix_simba.rs index f3f2d13b..5c259207 100644 --- a/src/base/matrix_simba.rs +++ b/src/base/matrix_simba.rs @@ -9,9 +9,11 @@ use crate::base::{DefaultAllocator, OMatrix, Scalar}; * Simd structures. * */ -impl SimdValue for OMatrix +impl SimdValue for OMatrix where T: Scalar + SimdValue, + R: Dim, + C: Dim, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, { @@ -42,7 +44,6 @@ where fn replace(&mut self, i: usize, val: Self::Element) { self.zip_apply(&val, |mut a, b| { a.replace(i, b); - a }) } @@ -50,7 +51,6 @@ where unsafe fn replace_unchecked(&mut self, i: usize, val: Self::Element) { self.zip_apply(&val, |mut a, b| { a.replace_unchecked(i, b); - a }) } diff --git a/src/base/matrix_slice.rs b/src/base/matrix_slice.rs index 0d65a4fa..261d41e2 100644 --- a/src/base/matrix_slice.rs +++ b/src/base/matrix_slice.rs @@ -1,14 +1,13 @@ use std::marker::PhantomData; -use std::mem::MaybeUninit; use std::ops::{Range, RangeFrom, RangeFull, RangeInclusive, RangeTo}; use std::slice; -use crate::base::allocator::{Allocator, InnerAllocator}; +use crate::base::allocator::Allocator; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Const, Dim, DimName, Dynamic, IsNotStaticOne, U1}; use crate::base::iter::MatrixIter; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; -use crate::base::{Matrix, Owned}; +use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, Storage}; +use crate::base::{Matrix, Scalar}; macro_rules! slice_storage_impl( ($doc: expr; $Storage: ident as $SRef: ty; $T: ident.$get_addr: ident ($Ptr: ty as $Ref: ty)) => { @@ -82,7 +81,7 @@ macro_rules! slice_storage_impl( impl <'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> $T<'a, T, R, C, RStride, CStride> where - Self: ContiguousStorage + Self: RawStorage + IsContiguous { /// Extracts the original slice from this storage pub fn into_slice(self) -> &'a [T] { @@ -100,19 +99,19 @@ macro_rules! slice_storage_impl( slice_storage_impl!("A matrix data storage for a matrix slice. Only contains an internal reference \ to another matrix data storage."; - Storage as &'a S; SliceStorage.get_address_unchecked(*const T as &'a T)); + RawStorage as &'a S; SliceStorage.get_address_unchecked(*const T as &'a T)); slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Only contains an \ internal mutable reference to another matrix data storage."; - StorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T) + RawStorageMut as &'a mut S; SliceStorageMut.get_address_unchecked_mut(*mut T as &'a mut T) ); -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy for SliceStorage<'a, T, R, C, RStride, CStride> { } -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone for SliceStorage<'a, T, R, C, RStride, CStride> { #[inline] @@ -126,10 +125,10 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone } } -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> +impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> SliceStorageMut<'a, T, R, C, RStride, CStride> where - Self: ContiguousStorageMut, + Self: RawStorageMut + IsContiguous, { /// Extracts the original slice from this storage pub fn into_slice_mut(self) -> &'a mut [T] { @@ -145,7 +144,7 @@ where macro_rules! storage_impl( ($($T: ident),* $(,)*) => {$( - unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage + unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> RawStorage for $T<'a, T, R, C, RStride, CStride> { type RStride = RStride; @@ -182,26 +181,6 @@ macro_rules! storage_impl( } } - #[inline] - fn into_owned(self) -> Owned - where - T: Clone, - DefaultAllocator: Allocator - { - self.clone_owned() - } - - #[inline] - fn clone_owned(&self) -> Owned - where - T: Clone, - DefaultAllocator: Allocator - { - let (nrows, ncols) = self.shape(); - let it = MatrixIter::new(self).cloned(); - Owned( DefaultAllocator::allocate_from_iterator(nrows, ncols, it)) - } - #[inline] unsafe fn as_slice_unchecked(&self) -> &[T] { let (nrows, ncols) = self.shape(); @@ -214,39 +193,29 @@ macro_rules! storage_impl( } } } + + unsafe impl<'a, T: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Storage + for $T<'a, T, R, C, RStride, CStride> { + #[inline] + fn into_owned(self) -> Owned + where DefaultAllocator: Allocator { + self.clone_owned() + } + + #[inline] + fn clone_owned(&self) -> Owned + where DefaultAllocator: Allocator { + let (nrows, ncols) = self.shape(); + let it = MatrixIter::new(self).cloned(); + DefaultAllocator::allocate_from_iterator(nrows, ncols, it) + } + } )*} ); storage_impl!(SliceStorage, SliceStorageMut); -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - SliceStorage<'a, MaybeUninit, R, C, RStride, CStride> -{ - /// Assumes a slice storage's entries to be initialized. This operation - /// should be near zero-cost. - /// - /// # Safety - /// All of the slice storage's entries must be initialized, otherwise - /// Undefined Behavior will be triggered. - pub unsafe fn assume_init(self) -> SliceStorage<'a, T, R, C, RStride, CStride> { - SliceStorage::from_raw_parts(self.ptr as *const T, self.shape, self.strides) - } -} - -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - SliceStorageMut<'a, MaybeUninit, R, C, RStride, CStride> -{ - /// Assumes a slice storage's entries to be initialized. This operation should be near zero-cost. - /// - /// # Safety - /// The user must make sure that every single entry of the buffer has been initialized, - /// or Undefined Behavior will immediately occur. - pub unsafe fn assume_init(self) -> SliceStorageMut<'a, T, R, C, RStride, CStride> { - SliceStorageMut::from_raw_parts(self.ptr as *mut T, self.shape, self.strides) - } -} - -unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut +unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> RawStorageMut for SliceStorageMut<'a, T, R, C, RStride, CStride> { #[inline] @@ -266,37 +235,22 @@ unsafe impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMut ContiguousStorage - for SliceStorage<'a, T, R, U1, U1, CStride> -{ -} - -unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorage +unsafe impl<'a, T, R: Dim, CStride: Dim> IsContiguous for SliceStorage<'a, T, R, U1, U1, CStride> {} +unsafe impl<'a, T, R: Dim, CStride: Dim> IsContiguous for SliceStorageMut<'a, T, R, U1, U1, CStride> { } -unsafe impl<'a, T, R: Dim, CStride: Dim> ContiguousStorageMut - for SliceStorageMut<'a, T, R, U1, U1, CStride> -{ -} - -unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> IsContiguous for SliceStorage<'a, T, R, C, U1, R> { } - -unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage +unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> IsContiguous for SliceStorageMut<'a, T, R, C, U1, R> { } -unsafe impl<'a, T, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut - for SliceStorageMut<'a, T, R, C, U1, R> -{ -} - -impl> Matrix { +impl> Matrix { #[inline] fn assert_slice_index( &self, @@ -344,7 +298,6 @@ macro_rules! matrix_slice_impl( $fixed_slice_with_steps: ident, $generic_slice: ident, $generic_slice_with_steps: ident, - $full_slice: ident, $rows_range_pair: ident, $columns_range_pair: ident) => { /* @@ -403,14 +356,14 @@ macro_rules! matrix_slice_impl( pub fn $rows_generic($me: $Me, row_start: usize, nrows: RSlice) -> $MatrixSlice<'_, T, RSlice, C, S::RStride, S::CStride> { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); $me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (0, 0)); let shape = (nrows, my_shape.1); unsafe { let data = $SliceStorage::new_unchecked($data, (row_start, 0), shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -421,16 +374,16 @@ macro_rules! matrix_slice_impl( -> $MatrixSlice<'_, T, RSlice, C, Dynamic, S::CStride> where RSlice: Dim { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); let my_strides = $me.data.strides(); $me.assert_slice_index((row_start, 0), (nrows.value(), my_shape.1.value()), (step, 0)); let strides = (Dynamic::new((step + 1) * my_strides.0.value()), my_strides.1); - let shape = (nrows, my_shape.1); + let shape = (nrows, my_shape.1); unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, (row_start, 0), shape, strides); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -491,33 +444,34 @@ macro_rules! matrix_slice_impl( pub fn $columns_generic($me: $Me, first_col: usize, ncols: CSlice) -> $MatrixSlice<'_, T, R, CSlice, S::RStride, S::CStride> { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); $me.assert_slice_index((0, first_col), (my_shape.0.value(), ncols.value()), (0, 0)); let shape = (my_shape.0, ncols); unsafe { let data = $SliceStorage::new_unchecked($data, (0, first_col), shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } + /// Extracts from this matrix `ncols` columns skipping `step` columns. Both argument may /// or may not be values known at compile-time. #[inline] pub fn $columns_generic_with_step($me: $Me, first_col: usize, ncols: CSlice, step: usize) -> $MatrixSlice<'_, T, R, CSlice, S::RStride, Dynamic> { - let my_shape = $me.data.shape(); + let my_shape = $me.shape_generic(); let my_strides = $me.data.strides(); $me.assert_slice_index((0, first_col), (my_shape.0.value(), ncols.value()), (0, step)); let strides = (my_strides.0, Dynamic::new((step + 1) * my_strides.1.value())); - let shape = (my_shape.0, ncols); + let shape = (my_shape.0, ncols); unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, (0, first_col), shape, strides); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -537,10 +491,11 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, start, shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } + /// Slices this matrix starting at its component `(start.0, start.1)` and with /// `(shape.0, shape.1)` components. Each row (resp. column) of the sliced matrix is /// separated by `steps.0` (resp. `steps.1`) ignored rows (resp. columns) of the @@ -564,7 +519,7 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_unchecked($data, (irow, icol), shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -581,14 +536,16 @@ macro_rules! matrix_slice_impl( /// Creates a slice that may or may not have a fixed size and stride. #[inline] - pub fn $generic_slice($me: $Me, start: (usize, usize), shape: (RSlice, CSlice)) - -> $MatrixSlice - { + pub fn $generic_slice($me: $Me, start: (usize, usize), shape: (RSlice, CSlice)) + -> $MatrixSlice<'_, T, RSlice, CSlice, S::RStride, S::CStride> + where RSlice: Dim, + CSlice: Dim { + $me.assert_slice_index(start, (shape.0.value(), shape.1.value()), (0, 0)); unsafe { let data = $SliceStorage::new_unchecked($data, start, shape); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } @@ -610,16 +567,10 @@ macro_rules! matrix_slice_impl( unsafe { let data = $SliceStorage::new_with_strides_unchecked($data, start, shape, strides); - Matrix::from_data(data) + Matrix::from_data_statically_unchecked(data) } } - /// Returns a slice containing the entire matrix. - pub fn $full_slice($me: $Me) -> $MatrixSlice { - let (nrows, ncols) = $me.shape(); - $me.$generic_slice((0, 0), (R::from_usize(nrows), C::from_usize(ncols))) - } - /* * * Splitting. @@ -633,7 +584,7 @@ macro_rules! matrix_slice_impl( -> ($MatrixSlice<'_, T, Range1::Size, C, S::RStride, S::CStride>, $MatrixSlice<'_, T, Range2::Size, C, S::RStride, S::CStride>) { - let (nrows, ncols) = $me.data.shape(); + let (nrows, ncols) = $me.shape_generic(); let strides = $me.data.strides(); let start1 = r1.begin(nrows); @@ -654,8 +605,8 @@ macro_rules! matrix_slice_impl( let data1 = $SliceStorage::from_raw_parts(ptr1, (nrows1, ncols), strides); let data2 = $SliceStorage::from_raw_parts(ptr2, (nrows2, ncols), strides); - let slice1 = Matrix::from_data(data1); - let slice2 = Matrix::from_data(data2); + let slice1 = Matrix::from_data_statically_unchecked(data1); + let slice2 = Matrix::from_data_statically_unchecked(data2); (slice1, slice2) } @@ -669,7 +620,7 @@ macro_rules! matrix_slice_impl( -> ($MatrixSlice<'_, T, R, Range1::Size, S::RStride, S::CStride>, $MatrixSlice<'_, T, R, Range2::Size, S::RStride, S::CStride>) { - let (nrows, ncols) = $me.data.shape(); + let (nrows, ncols) = $me.shape_generic(); let strides = $me.data.strides(); let start1 = r1.begin(ncols); @@ -690,8 +641,8 @@ macro_rules! matrix_slice_impl( let data1 = $SliceStorage::from_raw_parts(ptr1, (nrows, ncols1), strides); let data2 = $SliceStorage::from_raw_parts(ptr2, (nrows, ncols2), strides); - let slice1 = Matrix::from_data(data1); - let slice2 = Matrix::from_data(data2); + let slice1 = Matrix::from_data_statically_unchecked(data1); + let slice2 = Matrix::from_data_statically_unchecked(data2); (slice1, slice2) } @@ -707,9 +658,9 @@ pub type MatrixSliceMut<'a, T, R, C, RStride = U1, CStride = R> = Matrix>; /// # Slicing based on index and length -impl> Matrix { +impl> Matrix { matrix_slice_impl!( - self: &Self, MatrixSlice, SliceStorage, Storage.get_address_unchecked(), &self.data; + self: &Self, MatrixSlice, SliceStorage, RawStorage.get_address_unchecked(), &self.data; row, row_part, rows, @@ -732,15 +683,14 @@ impl> Matrix { fixed_slice_with_steps, generic_slice, generic_slice_with_steps, - full_slice, rows_range_pair, columns_range_pair); } /// # Mutable slicing based on index and length -impl> Matrix { +impl> Matrix { matrix_slice_impl!( - self: &mut Self, MatrixSliceMut, SliceStorageMut, StorageMut.get_address_unchecked_mut(), &mut self.data; + self: &mut Self, MatrixSliceMut, SliceStorageMut, RawStorageMut.get_address_unchecked_mut(), &mut self.data; row_mut, row_part_mut, rows_mut, @@ -763,29 +713,10 @@ impl> Matrix { fixed_slice_with_steps_mut, generic_slice_mut, generic_slice_with_steps_mut, - full_slice_mut, rows_range_pair_mut, columns_range_pair_mut); } -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - MatrixSlice<'a, MaybeUninit, R, C, RStride, CStride> -{ - /// Assumes a matrix slices's entries to be initialized. This operation should be near zero-cost. - pub unsafe fn slice_assume_init(self) -> MatrixSlice<'a, T, R, C, RStride, CStride> { - Matrix::from_data(self.data.assume_init()) - } -} - -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - MatrixSliceMut<'a, MaybeUninit, R, C, RStride, CStride> -{ - /// Assumes a matrix slices's entries to be initialized. This operation should be near zero-cost. - pub unsafe fn slice_assume_init(self) -> MatrixSliceMut<'a, T, R, C, RStride, CStride> { - Matrix::from_data(self.data.assume_init()) - } -} - /// A range with a size that may be known at compile-time. /// /// This may be: @@ -922,7 +853,7 @@ impl SliceRange for RangeInclusive { // TODO: see how much of this overlaps with the general indexing // methods from indexing.rs. -impl> Matrix { +impl> Matrix { /// Slices a sub-matrix containing the rows indexed by the range `rows` and the columns indexed /// by the range `cols`. #[inline] @@ -936,7 +867,7 @@ impl> Matrix { RowRange: SliceRange, ColRange: SliceRange, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); self.generic_slice( (rows.begin(nrows), cols.begin(ncols)), (rows.size(nrows), cols.size(ncols)), @@ -966,7 +897,7 @@ impl> Matrix { // TODO: see how much of this overlaps with the general indexing // methods from indexing.rs. -impl> Matrix { +impl> Matrix { /// Slices a mutable sub-matrix containing the rows indexed by the range `rows` and the columns /// indexed by the range `cols`. pub fn slice_range_mut( @@ -978,7 +909,7 @@ impl> Matrix { RowRange: SliceRange, ColRange: SliceRange, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); self.generic_slice_mut( (rows.begin(nrows), cols.begin(ncols)), (rows.size(nrows), cols.size(ncols)), @@ -1004,9 +935,13 @@ impl> Matrix { } } -impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> - From> +impl<'a, T, R, C, RStride, CStride> From> for MatrixSlice<'a, T, R, C, RStride, CStride> +where + R: Dim, + C: Dim, + RStride: Dim, + CStride: Dim, { fn from(slice_mut: MatrixSliceMut<'a, T, R, C, RStride, CStride>) -> Self { let data = SliceStorage { @@ -1016,6 +951,6 @@ impl<'a, T, R: Dim, C: Dim, RStride: Dim, CStride: Dim> _phantoms: PhantomData, }; - Matrix::from_data(data) + unsafe { Matrix::from_data_statically_unchecked(data) } } } diff --git a/src/base/min_max.rs b/src/base/min_max.rs index 83e62d10..3d390194 100644 --- a/src/base/min_max.rs +++ b/src/base/min_max.rs @@ -1,10 +1,10 @@ -use crate::storage::Storage; +use crate::storage::RawStorage; use crate::{ComplexField, Dim, Matrix, Scalar, SimdComplexField, SimdPartialOrd, Vector}; use num::{Signed, Zero}; use simba::simd::SimdSigned; /// # Find the min and max components -impl> Matrix { +impl> Matrix { /// Returns the absolute value of the component with the largest absolute value. /// # Example /// ``` @@ -167,7 +167,7 @@ impl> Matrix { } } -impl> Matrix { +impl> Matrix { /// Computes the index of the matrix component with the largest absolute value. /// /// # Examples: @@ -203,7 +203,7 @@ impl> Matri // TODO: find a way to avoid code duplication just for complex number support. /// # Find the min and max components (vector-specific methods) -impl> Vector { +impl> Vector { /// Computes the index of the vector component with the largest complex or real absolute value. /// /// # Examples: diff --git a/src/base/mod.rs b/src/base/mod.rs index fdfbb5c7..88b79dc3 100644 --- a/src/base/mod.rs +++ b/src/base/mod.rs @@ -33,10 +33,12 @@ mod unit; #[cfg(any(feature = "std", feature = "alloc"))] mod vec_storage; +mod blas_uninit; #[doc(hidden)] pub mod helper; mod interpolation; mod min_max; +pub mod uninit; pub use self::matrix::*; pub use self::norm::*; @@ -50,5 +52,6 @@ pub use self::alias::*; pub use self::alias_slice::*; pub use self::array_storage::*; pub use self::matrix_slice::*; +pub use self::storage::*; #[cfg(any(feature = "std", feature = "alloc"))] pub use self::vec_storage::*; diff --git a/src/base/norm.rs b/src/base/norm.rs index a8548ddd..c138069d 100644 --- a/src/base/norm.rs +++ b/src/base/norm.rs @@ -434,7 +434,7 @@ impl> Matrix { { let n = self.norm(); let le = n.simd_le(min_norm); - self.apply(|e| e.simd_unscale(n).select(le, e)); + self.apply(|e| *e = e.simd_unscale(n).select(le, *e)); SimdOption::new(n, le) } @@ -508,13 +508,8 @@ where /// The i-the canonical basis element. #[inline] fn canonical_basis_element(i: usize) -> Self { - assert!(i < D::dim(), "Index out of bound."); - let mut res = Self::zero(); - unsafe { - *res.data.get_unchecked_linear_mut(i) = T::one(); - } - + res[i] = T::one(); res } diff --git a/src/base/ops.rs b/src/base/ops.rs index 45a84b35..bbeb6d07 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -1,29 +1,31 @@ use num::{One, Zero}; use std::iter; -use std::mem::MaybeUninit; use std::ops::{ Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign, }; use simba::scalar::{ClosedAdd, ClosedDiv, ClosedMul, ClosedNeg, ClosedSub}; -use crate::base::allocator::{ - Allocator, InnerAllocator, SameShapeAllocator, SameShapeC, SameShapeR, -}; +use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR}; +use crate::base::blas_uninit::gemm_uninit; use crate::base::constraint::{ AreMultipliable, DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint, }; use crate::base::dimension::{Dim, DimMul, DimName, DimProd, Dynamic}; -use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut}; +use crate::base::storage::{Storage, StorageMut}; +use crate::base::uninit::Uninit; use crate::base::{DefaultAllocator, Matrix, MatrixSum, OMatrix, Scalar, VectorSlice}; -use crate::{MatrixSliceMut, SimdComplexField}; +use crate::storage::IsContiguous; +use crate::uninit::{Init, InitStatus}; +use crate::{RawStorage, RawStorageMut, SimdComplexField}; +use std::mem::MaybeUninit; /* * * Indexing. * */ -impl> Index for Matrix { +impl> Index for Matrix { type Output = T; #[inline] @@ -33,10 +35,7 @@ impl> Index for Matrix } } -impl Index<(usize, usize)> for Matrix -where - S: Storage, -{ +impl> Index<(usize, usize)> for Matrix { type Output = T; #[inline] @@ -52,7 +51,7 @@ where } // Mutable versions. -impl> IndexMut for Matrix { +impl> IndexMut for Matrix { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { let ij = self.vector_to_matrix_index(i); @@ -60,10 +59,7 @@ impl> IndexMut for Matrix IndexMut<(usize, usize)> for Matrix -where - S: StorageMut, -{ +impl> IndexMut<(usize, usize)> for Matrix { #[inline] fn index_mut(&mut self, ij: (usize, usize)) -> &mut T { let shape = self.shape(); @@ -135,25 +131,27 @@ macro_rules! componentwise_binop_impl( ($Trait: ident, $method: ident, $bound: ident; $TraitAssign: ident, $method_assign: ident, $method_assign_statically_unchecked: ident, $method_assign_statically_unchecked_rhs: ident; - $method_to: ident, $method_to_statically_unchecked: ident) => { + $method_to: ident, $method_to_statically_unchecked_uninit: ident) => { + impl> Matrix - where - T: Scalar + $bound - { + where T: Scalar + $bound { + /* * * Methods without dimension checking at compile-time. - * This is useful for code reuse because the sum representative system does not play - * nicely with static checks. + * This is useful for code reuse because the sum representative system does not plays + * easily with static checks. * */ #[inline] - fn $method_to_statically_unchecked( - &self, rhs: &Matrix, out: &mut Matrix, R3, C3, SC> - ) where - SB: Storage, - SC: StorageMut, R3, C3> - { + fn $method_to_statically_unchecked_uninit(&self, + status: Status, + rhs: &Matrix, + out: &mut Matrix) + where Status: InitStatus, + SB: RawStorage, + SC: RawStorageMut { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); assert_eq!(self.shape(), out.shape(), "Matrix addition/subtraction output dimensions mismatch."); @@ -163,31 +161,28 @@ macro_rules! componentwise_binop_impl( if self.data.is_contiguous() && rhs.data.is_contiguous() && out.data.is_contiguous() { let arr1 = self.data.as_slice_unchecked(); let arr2 = rhs.data.as_slice_unchecked(); - let out = out.data.as_mut_slice_unchecked(); - for i in 0..arr1.len() { - *out.get_unchecked_mut(i) = MaybeUninit::new( - arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone() - )); + let out = out.data.as_mut_slice_unchecked(); + for i in 0 .. arr1.len() { + Status::init(out.get_unchecked_mut(i), arr1.get_unchecked(i).inlined_clone().$method(arr2.get_unchecked(i).inlined_clone())); } } else { - for j in 0..self.ncols() { - for i in 0..self.nrows() { - *out.get_unchecked_mut((i, j)) = MaybeUninit::new( - self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone()) - ); + for j in 0 .. self.ncols() { + for i in 0 .. self.nrows() { + let val = self.get_unchecked((i, j)).inlined_clone().$method(rhs.get_unchecked((i, j)).inlined_clone()); + Status::init(out.get_unchecked_mut((i, j)), val); } } } } } + #[inline] - fn $method_assign_statically_unchecked( - &mut self, rhs: &Matrix - ) where - SA: StorageMut, - SB: Storage - { + fn $method_assign_statically_unchecked(&mut self, rhs: &Matrix) + where R2: Dim, + C2: Dim, + SA: StorageMut, + SB: Storage { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); // This is the most common case and should be deduced at compile-time. @@ -210,12 +205,12 @@ macro_rules! componentwise_binop_impl( } } + #[inline] - fn $method_assign_statically_unchecked_rhs( - &self, rhs: &mut Matrix - ) where - SB: StorageMut - { + fn $method_assign_statically_unchecked_rhs(&self, rhs: &mut Matrix) + where R2: Dim, + C2: Dim, + SB: StorageMut { assert_eq!(self.shape(), rhs.shape(), "Matrix addition/subtraction dimensions mismatch."); // This is the most common case and should be deduced at compile-time. @@ -250,20 +245,15 @@ macro_rules! componentwise_binop_impl( */ /// Equivalent to `self + rhs` but stores the result into `out` to avoid allocations. #[inline] - pub fn $method_to( - &self, - rhs: &Matrix, - out: &mut Matrix, R3, C3, SC> - ) where - SB: Storage, - SC: StorageMut, R3, C3>, - ShapeConstraint: - SameNumberOfRows + - SameNumberOfColumns + - SameNumberOfRows + - SameNumberOfColumns - { - self.$method_to_statically_unchecked(rhs, out) + pub fn $method_to(&self, + rhs: &Matrix, + out: &mut Matrix) + where SB: Storage, + SC: StorageMut, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + + SameNumberOfRows + SameNumberOfColumns { + self.$method_to_statically_unchecked_uninit(Init, rhs, out) } } @@ -285,14 +275,13 @@ macro_rules! componentwise_binop_impl( } } - impl<'a, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $Trait> for &'a Matrix - where - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl<'a, T, R1, C1, R2, C2, SA, SB> $Trait> for &'a Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { type Output = MatrixSum; #[inline] @@ -304,14 +293,13 @@ macro_rules! componentwise_binop_impl( } } - impl $Trait> for Matrix - where - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl $Trait> for Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { type Output = MatrixSum; #[inline] @@ -320,14 +308,13 @@ macro_rules! componentwise_binop_impl( } } - impl<'a, 'b, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $Trait<&'b Matrix> for &'a Matrix - where - T: Scalar + $bound, - SA: Storage, - SB: Storage, - DefaultAllocator: SameShapeAllocator, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl<'a, 'b, T, R1, C1, R2, C2, SA, SB> $Trait<&'b Matrix> for &'a Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: Storage, + SB: Storage, + DefaultAllocator: SameShapeAllocator, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { type Output = MatrixSum; #[inline] @@ -335,33 +322,33 @@ macro_rules! componentwise_binop_impl( let (nrows, ncols) = self.shape(); let nrows: SameShapeR = Dim::from_usize(nrows); let ncols: SameShapeC = Dim::from_usize(ncols); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); - - self.$method_to_statically_unchecked(rhs, &mut res); + let mut res = Matrix::uninit(nrows, ncols); + self.$method_to_statically_unchecked_uninit(Uninit, rhs, &mut res); + // SAFETY: the output has been initialized above. unsafe { res.assume_init() } } } - impl<'b, T, R1: Dim, C1: Dim, R2: Dim, C2: Dim, SA, SB> $TraitAssign<&'b Matrix> for Matrix - where - T: Scalar + $bound, - SA: StorageMut, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl<'b, T, R1, C1, R2, C2, SA, SB> $TraitAssign<&'b Matrix> for Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: StorageMut, + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + #[inline] fn $method_assign(&mut self, rhs: &'b Matrix) { self.$method_assign_statically_unchecked(rhs) } } - impl $TraitAssign> for Matrix - where - T: Scalar + $bound, - SA: StorageMut, - SB: Storage, - ShapeConstraint: SameNumberOfRows + SameNumberOfColumns - { + impl $TraitAssign> for Matrix + where R1: Dim, C1: Dim, R2: Dim, C2: Dim, + T: Scalar + $bound, + SA: StorageMut, + SB: Storage, + ShapeConstraint: SameNumberOfRows + SameNumberOfColumns { + #[inline] fn $method_assign(&mut self, rhs: Matrix) { self.$method_assign(&rhs) @@ -372,10 +359,10 @@ macro_rules! componentwise_binop_impl( componentwise_binop_impl!(Add, add, ClosedAdd; AddAssign, add_assign, add_assign_statically_unchecked, add_assign_statically_unchecked_mut; - add_to, add_to_statically_unchecked); + add_to, add_to_statically_unchecked_uninit); componentwise_binop_impl!(Sub, sub, ClosedSub; SubAssign, sub_assign, sub_assign_statically_unchecked, sub_assign_statically_unchecked_mut; - sub_to, sub_to_statically_unchecked); + sub_to, sub_to_statically_unchecked_uninit); impl iter::Sum for OMatrix where @@ -574,9 +561,12 @@ where #[inline] fn mul(self, rhs: &'b Matrix) -> Self::Output { - let mut res = Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1); - let _ = self.mul_to(rhs, &mut res); - unsafe { res.assume_init() } + let mut res = Matrix::uninit(self.shape_generic().0, rhs.shape_generic().1); + unsafe { + // SAFETY: this is OK because status = Uninit && bevy == 0 + gemm_uninit(Uninit, &mut res, T::one(), self, rhs, T::zero()); + res.assume_init() + } } } @@ -634,14 +624,16 @@ where // TODO: this is too restrictive: // − we can't use `a *= b` when `a` is a mutable slice. // − we can't use `a *= b` when C2 is not equal to C1. -impl MulAssign> - for Matrix +impl MulAssign> for Matrix where + R1: Dim, + C1: Dim, + R2: Dim, T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, - SA: ContiguousStorageMut, + SA: StorageMut + IsContiguous + Clone, // TODO: get rid of the IsContiguous ShapeConstraint: AreMultipliable, - DefaultAllocator: Allocator + InnerAllocator, + DefaultAllocator: Allocator, { #[inline] fn mul_assign(&mut self, rhs: Matrix) { @@ -649,15 +641,17 @@ where } } -impl<'b, T, R1: Dim, C1: Dim, R2: Dim, SA, SB> MulAssign<&'b Matrix> - for Matrix +impl<'b, T, R1, C1, R2, SA, SB> MulAssign<&'b Matrix> for Matrix where + R1: Dim, + C1: Dim, + R2: Dim, T: Scalar + Zero + One + ClosedAdd + ClosedMul, SB: Storage, - SA: ContiguousStorageMut, + SA: StorageMut + IsContiguous + Clone, // TODO: get rid of the IsContiguous ShapeConstraint: AreMultipliable, // TODO: this is too restrictive. See comments for the non-ref version. - DefaultAllocator: Allocator + InnerAllocator, + DefaultAllocator: Allocator, { #[inline] fn mul_assign(&mut self, rhs: &'b Matrix) { @@ -680,8 +674,9 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1); - self.tr_mul_to(rhs, &mut res); + let mut res = Matrix::uninit(self.shape_generic().1, rhs.shape_generic().1); + self.xx_mul_to_uninit(Uninit, rhs, &mut res, |a, b| a.dot(b)); + // SAFETY: this is OK because the result is now initialized. unsafe { res.assume_init() } } @@ -695,23 +690,26 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1); - self.ad_mul_to(rhs, &mut res); + let mut res = Matrix::uninit(self.shape_generic().1, rhs.shape_generic().1); + self.xx_mul_to_uninit(Uninit, rhs, &mut res, |a, b| a.dotc(b)); + // SAFETY: this is OK because the result is now initialized. unsafe { res.assume_init() } } #[inline(always)] - fn xx_mul_to( + fn xx_mul_to_uninit( &self, + status: Status, rhs: &Matrix, - out: &mut Matrix, R3, C3, SC>, + out: &mut Matrix, dot: impl Fn( &VectorSlice<'_, T, R1, SA::RStride, SA::CStride>, &VectorSlice<'_, T, R2, SB::RStride, SB::CStride>, ) -> T, ) where - SB: Storage, - SC: StorageMut, R3, C3>, + Status: InitStatus, + SB: RawStorage, + SC: RawStorageMut, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { let (nrows1, ncols1) = self.shape(); @@ -740,9 +738,8 @@ where for i in 0..ncols1 { for j in 0..ncols2 { let dot = dot(&self.column(i), &rhs.column(j)); - unsafe { - *out.get_unchecked_mut((i, j)) = MaybeUninit::new(dot); - } + let elt = unsafe { out.get_unchecked_mut((i, j)) }; + Status::init(elt, dot); } } } @@ -753,13 +750,13 @@ where pub fn tr_mul_to( &self, rhs: &Matrix, - out: &mut Matrix, R3, C3, SC>, + out: &mut Matrix, ) where SB: Storage, - SC: StorageMut, R3, C3>, + SC: StorageMut, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { - self.xx_mul_to(rhs, out, |a, b| a.dot(b)) + self.xx_mul_to_uninit(Init, rhs, out, |a, b| a.dot(b)) } /// Equivalent to `self.adjoint() * rhs` but stores the result into `out` to avoid @@ -768,31 +765,30 @@ where pub fn ad_mul_to( &self, rhs: &Matrix, - out: &mut Matrix, R3, C3, SC>, + out: &mut Matrix, ) where T: SimdComplexField, SB: Storage, - SC: StorageMut, R3, C3>, + SC: StorageMut, ShapeConstraint: SameNumberOfRows + DimEq + DimEq, { - self.xx_mul_to(rhs, out, |a, b| a.dotc(b)) + self.xx_mul_to_uninit(Init, rhs, out, |a, b| a.dotc(b)) } /// Equivalent to `self * rhs` but stores the result into `out` to avoid allocations. #[inline] - pub fn mul_to<'a, R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>( + pub fn mul_to( &self, rhs: &Matrix, - out: &'a mut Matrix, R3, C3, SC>, - ) -> MatrixSliceMut<'a, T, R3, C3, SC::RStride, SC::CStride> - where + out: &mut Matrix, + ) where SB: Storage, - SC: StorageMut, R3, C3>, + SC: StorageMut, ShapeConstraint: SameNumberOfRows + SameNumberOfColumns + AreMultipliable, { - out.gemm_z(T::one(), self, rhs) + out.gemm(T::one(), self, rhs, T::zero()); } /// The kronecker product of two matrices (aka. tensor product of the corresponding linear @@ -809,34 +805,31 @@ where SB: Storage, DefaultAllocator: Allocator, DimProd>, { - let (nrows1, ncols1) = self.data.shape(); - let (nrows2, ncols2) = rhs.data.shape(); + let (nrows1, ncols1) = self.shape_generic(); + let (nrows2, ncols2) = rhs.shape_generic(); - let mut res = Matrix::new_uninitialized_generic(nrows1.mul(nrows2), ncols1.mul(ncols2)); - - { - let mut data_res = res.data.ptr_mut(); + let mut res = Matrix::uninit(nrows1.mul(nrows2), ncols1.mul(ncols2)); + let mut data_res = res.data.ptr_mut(); + unsafe { for j1 in 0..ncols1.value() { for j2 in 0..ncols2.value() { for i1 in 0..nrows1.value() { - unsafe { - let coeff = self.get_unchecked((i1, j1)).inlined_clone(); + let coeff = self.get_unchecked((i1, j1)).inlined_clone(); - for i2 in 0..nrows2.value() { - *data_res = MaybeUninit::new( - coeff.inlined_clone() - * rhs.get_unchecked((i2, j2)).inlined_clone(), - ); - data_res = data_res.offset(1); - } + for i2 in 0..nrows2.value() { + *data_res = MaybeUninit::new( + coeff.inlined_clone() * rhs.get_unchecked((i2, j2)).inlined_clone(), + ); + data_res = data_res.offset(1); } } } } - } - unsafe { res.assume_init() } + // SAFETY: the result matrix has been initialized by the loop above. + res.assume_init() + } } } diff --git a/src/base/properties.rs b/src/base/properties.rs index 00333708..091d36ef 100644 --- a/src/base/properties.rs +++ b/src/base/properties.rs @@ -7,9 +7,10 @@ use simba::scalar::{ClosedAdd, ClosedMul, ComplexField, RealField}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimMin}; use crate::base::storage::Storage; -use crate::base::{DefaultAllocator, Matrix, SquareMatrix}; +use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix}; +use crate::RawStorage; -impl> Matrix { +impl> Matrix { /// The total number of elements of this matrix. /// /// # Examples: diff --git a/src/base/scalar.rs b/src/base/scalar.rs index 80a78594..db9e458d 100644 --- a/src/base/scalar.rs +++ b/src/base/scalar.rs @@ -1,38 +1,27 @@ +use std::any::Any; use std::any::TypeId; use std::fmt::Debug; -/// The basic scalar trait for all structures of `nalgebra`. +/// The basic scalar type for all structures of `nalgebra`. /// -/// This is by design a very loose trait, and does not make any assumption on -/// the algebraic properties of `Self`. It has various purposes and objectives: -/// - Enforces simple and future-proof trait bounds. -/// - Enables important optimizations for floating point types via specialization. -/// - Makes debugging generic code possible in most circumstances. -pub trait Scalar: 'static + Clone + Debug { +/// This does not make any assumption on the algebraic properties of `Self`. +pub trait Scalar: Clone + PartialEq + Debug + Any { #[inline] - /// Tests whether `Self` is the same as the type `T`. + /// Tests if `Self` the same as the type `T` /// - /// Typically used to test of `Self` is an `f32` or an `f64`, which is - /// important as it allows for specialization and certain optimizations to - /// be made. - /// - // If the need ever arose to get rid of the `'static` requirement, we could - // merely replace this method by two unsafe associated methods `is_f32` and - // `is_f64`. + /// Typically used to test of `Self` is a f32 or a f64 with `T::is::()`. fn is() -> bool { TypeId::of::() == TypeId::of::() } - /// Performance hack: Clone doesn't get inlined for Copy types in debug - /// mode, so make it inline anyway. + #[inline(always)] + /// Performance hack: Clone doesn't get inlined for Copy types in debug mode, so make it inline anyway. fn inlined_clone(&self) -> Self { self.clone() } } -// Unfortunately, this blanket impl leads to many misleading compiler messages -// telling you to implement Copy, even though Scalar is what's really needed. -impl Scalar for T { +impl Scalar for T { #[inline(always)] fn inlined_clone(&self) -> T { *self diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 84a6592a..ebf694a5 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -1,13 +1,12 @@ -use std::mem::MaybeUninit; - use crate::allocator::Allocator; -use crate::storage::Storage; +use crate::storage::RawStorage; use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, RowOVector, Scalar, VectorSlice, U1}; use num::Zero; use simba::scalar::{ClosedAdd, Field, SupersetOf}; +use std::mem::MaybeUninit; /// # Folding on columns and rows -impl> Matrix { +impl> Matrix { /// Returns a row vector where each element is the result of the application of `f` on the /// corresponding column of the original matrix. #[inline] @@ -19,16 +18,18 @@ impl> Matrix { where DefaultAllocator: Allocator, { - let ncols = self.data.shape().1; - let mut res = RowOVector::new_uninitialized_generic(Const::<1>, ncols); + let ncols = self.shape_generic().1; + let mut res = Matrix::uninit(Const::<1>, ncols); for i in 0..ncols.value() { // TODO: avoid bound checking of column. + // Safety: all indices are in range. unsafe { *res.get_unchecked_mut((0, i)) = MaybeUninit::new(f(self.column(i))); } } + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -45,16 +46,18 @@ impl> Matrix { where DefaultAllocator: Allocator, { - let ncols = self.data.shape().1; - let mut res = Matrix::new_uninitialized_generic(ncols, Const::<1>); + let ncols = self.shape_generic().1; + let mut res = Matrix::uninit(ncols, Const::<1>); for i in 0..ncols.value() { // TODO: avoid bound checking of column. + // Safety: all indices are in range. unsafe { *res.vget_unchecked_mut(i) = MaybeUninit::new(f(self.column(i))); } } + // Safety: res is now fully initialized. unsafe { res.assume_init() } } @@ -63,22 +66,24 @@ impl> Matrix { #[must_use] pub fn compress_columns( &self, - mut init: OVector, - f: impl Fn(&mut OVector, VectorSlice), + init: OVector, + f: impl Fn(&mut OVector, VectorSlice<'_, T, R, S::RStride, S::CStride>), ) -> OVector where DefaultAllocator: Allocator, { + let mut res = init; + for i in 0..self.ncols() { - f(&mut init, self.column(i)) + f(&mut res, self.column(i)) } - init + res } } /// # Common statistics operations -impl> Matrix { +impl> Matrix { /* * * Sum computation. @@ -178,7 +183,7 @@ impl> Matrix { T: ClosedAdd + Zero, DefaultAllocator: Allocator, { - let nrows = self.data.shape().0; + let nrows = self.shape_generic().0; self.compress_columns(OVector::zeros_generic(nrows, Const::<1>), |out, col| { *out += col; }) @@ -281,10 +286,10 @@ impl> Matrix { T: Field + SupersetOf, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); let mut mean = self.column_mean(); - mean.apply(|e| -(e.inlined_clone() * e)); + mean.apply(|e| *e = -(e.inlined_clone() * e.inlined_clone())); let denom = T::one() / crate::convert::<_, T>(ncols.value() as f64); self.compress_columns(mean, |out, col| { @@ -389,7 +394,7 @@ impl> Matrix { T: Field + SupersetOf, DefaultAllocator: Allocator, { - let (nrows, ncols) = self.data.shape(); + let (nrows, ncols) = self.shape_generic(); let denom = T::one() / crate::convert::<_, T>(ncols.value() as f64); self.compress_columns(OVector::zeros_generic(nrows, Const::<1>), |out, col| { out.axpy(denom.inlined_clone(), &col, T::one()) diff --git a/src/base/storage.rs b/src/base/storage.rs index 1f06a11e..7ef7e152 100644 --- a/src/base/storage.rs +++ b/src/base/storage.rs @@ -2,27 +2,32 @@ use std::ptr; -use crate::base::allocator::{Allocator, InnerAllocator, SameShapeC, SameShapeR}; +use crate::base::allocator::{Allocator, SameShapeC, SameShapeR}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, U1}; -use crate::base::Owned; +use crate::base::Scalar; /* * Aliases for allocation results. */ +/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`. +pub type SameShapeStorage = + , SameShapeC>>::Buffer; // TODO: better name than Owned ? /// The owned data storage that can be allocated from `S`. -pub type InnerOwned = >::Buffer; +pub type Owned = >::Buffer; -/// The data storage for the sum of two matrices with dimensions `(R1, C1)` and `(R2, C2)`. -pub type SameShapeStorage = Owned, SameShapeC>; +/// The owned data storage that can be allocated from `S`. +pub type OwnedUninit = >::BufferUninit; /// The row-stride of the owned data storage for a buffer of dimension `(R, C)`. -pub type RStride = as Storage>::RStride; +pub type RStride = + <>::Buffer as RawStorage>::RStride; /// The column-stride of the owned data storage for a buffer of dimension `(R, C)`. -pub type CStride = as Storage>::CStride; +pub type CStride = + <>::Buffer as RawStorage>::CStride; /// The trait shared by all matrix data storage. /// @@ -33,7 +38,7 @@ pub type CStride = as Storage>::CStr /// should **not** allow the user to modify the size of the underlying buffer with safe methods /// (for example the `VecStorage::data_mut` method is unsafe because the user could change the /// vector's size so that it no longer contains enough elements: this will lead to UB. -pub unsafe trait Storage: Sized { +pub unsafe trait RawStorage: Sized { /// The static stride of this storage's rows. type RStride: Dim; @@ -118,17 +123,17 @@ pub unsafe trait Storage: Sized { /// /// Call the safe alternative `matrix.as_slice()` instead. unsafe fn as_slice_unchecked(&self) -> &[T]; +} +pub unsafe trait Storage: RawStorage { /// Builds a matrix data storage that does not contain any reference. fn into_owned(self) -> Owned where - T: Clone, DefaultAllocator: Allocator; /// Clones this data storage to one that does not contain any reference. fn clone_owned(&self) -> Owned where - T: Clone, DefaultAllocator: Allocator; } @@ -137,7 +142,7 @@ pub unsafe trait Storage: Sized { /// Note that a mutable access does not mean that the matrix owns its data. For example, a mutable /// matrix slice can provide mutable access to its elements even if it does not own its data (it /// contains only an internal reference to them). -pub unsafe trait StorageMut: Storage { +pub unsafe trait RawStorageMut: RawStorage { /// The matrix mutable data pointer. fn ptr_mut(&mut self) -> *mut T; @@ -212,40 +217,37 @@ pub unsafe trait StorageMut: Storage { unsafe fn as_mut_slice_unchecked(&mut self) -> &mut [T]; } -/// A matrix storage that is stored contiguously in memory. -/// -/// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value -/// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because -/// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorage: Storage { - /// Converts this data storage to a contiguous slice. - fn as_slice(&self) -> &[T] { - // SAFETY: this is safe because this trait guarantees the fact - // that the data is stored contiguously. - unsafe { self.as_slice_unchecked() } - } +pub unsafe trait StorageMut: + Storage + RawStorageMut +{ } -/// A mutable matrix storage that is stored contiguously in memory. +unsafe impl StorageMut for S +where + R: Dim, + C: Dim, + S: Storage + RawStorageMut, +{ +} + +/// Marker trait indicating that a storage is stored contiguously in memory. /// /// The storage requirement means that for any value of `i` in `[0, nrows * ncols - 1]`, the value /// `.get_unchecked_linear` returns one of the matrix component. This trait is unsafe because /// failing to comply to this may cause Undefined Behaviors. -pub unsafe trait ContiguousStorageMut: - ContiguousStorage + StorageMut -{ - /// Converts this data storage to a contiguous mutable slice. - fn as_mut_slice(&mut self) -> &mut [T] { - // SAFETY: this is safe because this trait guarantees the fact - // that the data is stored contiguously. - unsafe { self.as_mut_slice_unchecked() } - } -} +pub unsafe trait IsContiguous {} /// A matrix storage that can be reshaped in-place. -pub trait ReshapableStorage: Storage { +pub trait ReshapableStorage: RawStorage +where + T: Scalar, + R1: Dim, + C1: Dim, + R2: Dim, + C2: Dim, +{ /// The reshaped storage type. - type Output: Storage; + type Output: RawStorage; /// Reshapes the storage into the output storage type. fn reshape_generic(self, nrows: R2, ncols: C2) -> Self::Output; diff --git a/src/base/swizzle.rs b/src/base/swizzle.rs index 0c471301..6ed05d81 100644 --- a/src/base/swizzle.rs +++ b/src/base/swizzle.rs @@ -1,5 +1,5 @@ -use crate::base::{DimName, ToTypenum, Vector, Vector2, Vector3}; -use crate::storage::Storage; +use crate::base::{DimName, Scalar, ToTypenum, Vector, Vector2, Vector3}; +use crate::storage::RawStorage; use typenum::{self, Cmp, Greater}; macro_rules! impl_swizzle { @@ -11,7 +11,7 @@ macro_rules! impl_swizzle { #[must_use] pub fn $name(&self) -> $Result where D::Typenum: Cmp { - $Result::new($(self[$i].clone()),*) + $Result::new($(self[$i].inlined_clone()),*) } )* )* @@ -19,7 +19,7 @@ macro_rules! impl_swizzle { } /// # Swizzling -impl> Vector +impl> Vector where D: DimName + ToTypenum, { diff --git a/src/base/uninit.rs b/src/base/uninit.rs new file mode 100644 index 00000000..7fc5f84e --- /dev/null +++ b/src/base/uninit.rs @@ -0,0 +1,76 @@ +use std::mem::MaybeUninit; + +// # Safety +// This trait must not be implemented outside of this crate. +pub unsafe trait InitStatus: Copy { + type Value; + fn init(out: &mut Self::Value, t: T); + unsafe fn assume_init_ref(t: &Self::Value) -> &T; + unsafe fn assume_init_mut(t: &mut Self::Value) -> &mut T; +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct Init; +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct Uninit; +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct Initialized(pub Status); + +unsafe impl InitStatus for Init { + type Value = T; + + #[inline(always)] + fn init(out: &mut T, t: T) { + *out = t; + } + + #[inline(always)] + unsafe fn assume_init_ref(t: &T) -> &T { + t + } + + #[inline(always)] + unsafe fn assume_init_mut(t: &mut T) -> &mut T { + t + } +} + +unsafe impl InitStatus for Uninit { + type Value = MaybeUninit; + + #[inline(always)] + fn init(out: &mut MaybeUninit, t: T) { + *out = MaybeUninit::new(t); + } + + #[inline(always)] + unsafe fn assume_init_ref(t: &MaybeUninit) -> &T { + std::mem::transmute(t.as_ptr()) // TODO: use t.assume_init_ref() + } + + #[inline(always)] + unsafe fn assume_init_mut(t: &mut MaybeUninit) -> &mut T { + std::mem::transmute(t.as_mut_ptr()) // TODO: use t.assume_init_mut() + } +} + +unsafe impl> InitStatus for Initialized { + type Value = Status::Value; + + #[inline(always)] + fn init(out: &mut Status::Value, t: T) { + unsafe { + *Status::assume_init_mut(out) = t; + } + } + + #[inline(always)] + unsafe fn assume_init_ref(t: &Status::Value) -> &T { + Status::assume_init_ref(t) + } + + #[inline(always)] + unsafe fn assume_init_mut(t: &mut Status::Value) -> &mut T { + Status::assume_init_mut(t) + } +} diff --git a/src/base/unit.rs b/src/base/unit.rs index 73fcd6dd..fa869c09 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -10,7 +10,7 @@ use abomonation::Abomonation; use crate::allocator::Allocator; use crate::base::DefaultAllocator; -use crate::storage::{InnerOwned, Storage}; +use crate::storage::RawStorage; use crate::{Dim, Matrix, OMatrix, RealField, Scalar, SimdComplexField, SimdRealField}; /// A wrapper that ensures the underlying algebraic entity has a unit norm. @@ -113,10 +113,10 @@ mod rkyv_impl { impl PartialEq for Unit> where - T: PartialEq, + T: Scalar + PartialEq, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { #[inline] fn eq(&self, rhs: &Self) -> bool { @@ -126,10 +126,10 @@ where impl Eq for Unit> where - T: Eq, + T: Scalar + Eq, R: Dim, C: Dim, - S: Storage, + S: RawStorage, { } @@ -228,7 +228,7 @@ impl Unit { /// Wraps the given reference, assuming it is already normalized. #[inline] pub fn from_ref_unchecked(value: &T) -> &Self { - unsafe { &*(value as *const _ as *const _) } + unsafe { &*(value as *const T as *const Self) } } /// Retrieves the underlying value. @@ -331,7 +331,7 @@ impl Deref for Unit { #[inline] fn deref(&self) -> &T { - unsafe { &*(self as *const _ as *const T) } + unsafe { &*(self as *const Self as *const T) } } } @@ -344,7 +344,6 @@ where T: From<[::Element; 2]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 2]) -> Self { @@ -361,7 +360,6 @@ where T: From<[::Element; 4]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 4]) -> Self { @@ -380,7 +378,6 @@ where T: From<[::Element; 8]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 8]) -> Self { @@ -403,7 +400,6 @@ where T: From<[::Element; 16]>, T::Element: Scalar, DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, { #[inline] fn from(arr: [Unit>; 16]) -> Self { diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs index 494e2090..f5b0b01c 100644 --- a/src/base/vec_storage.rs +++ b/src/base/vec_storage.rs @@ -4,14 +4,12 @@ use std::io::{Result as IOResult, Write}; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; -use crate::allocator::InnerAllocator; +use crate::base::allocator::Allocator; use crate::base::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::base::default_allocator::DefaultAllocator; use crate::base::dimension::{Dim, DimName, Dynamic, U1}; -use crate::base::storage::{ - ContiguousStorage, ContiguousStorageMut, ReshapableStorage, Storage, StorageMut, -}; -use crate::base::{Owned, Vector}; +use crate::base::storage::{IsContiguous, Owned, RawStorage, RawStorageMut, ReshapableStorage}; +use crate::base::{Scalar, Vector}; #[cfg(feature = "serde-serialize-no-std")] use serde::{ @@ -19,20 +17,22 @@ use serde::{ ser::{Serialize, Serializer}, }; +use crate::Storage; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; /* * - * Storage. + * RawStorage. * */ /// A Vec-based matrix data storage. It may be dynamically-sized. +#[repr(C)] #[derive(Eq, Debug, Clone, PartialEq)] pub struct VecStorage { data: Vec, - pub(crate) nrows: R, - pub(crate) ncols: C, + nrows: R, + ncols: C, } #[cfg(feature = "serde-serialize")] @@ -142,6 +142,18 @@ impl VecStorage { pub fn is_empty(&self) -> bool { self.len() == 0 } + + /// A slice containing all the components stored in this storage in column-major order. + #[inline] + pub fn as_slice(&self) -> &[T] { + &self.data[..] + } + + /// A mutable slice containing all the components stored in this storage in column-major order. + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [T] { + &mut self.data[..] + } } impl From> for Vec { @@ -156,10 +168,7 @@ impl From> for Vec { * Dynamic − Dynamic * */ -unsafe impl Storage for VecStorage -where - DefaultAllocator: InnerAllocator, -{ +unsafe impl RawStorage for VecStorage { type RStride = U1; type CStride = Dynamic; @@ -183,29 +192,34 @@ where true } - #[inline] - fn into_owned(self) -> Owned { - Owned(self) - } - - #[inline] - fn clone_owned(&self) -> Owned - where - T: Clone, - { - Owned(self.clone()) - } - #[inline] unsafe fn as_slice_unchecked(&self) -> &[T] { &self.data } } -unsafe impl Storage for VecStorage +unsafe impl Storage for VecStorage where - DefaultAllocator: InnerAllocator, + DefaultAllocator: Allocator, { + #[inline] + fn into_owned(self) -> Owned + where + DefaultAllocator: Allocator, + { + self + } + + #[inline] + fn clone_owned(&self) -> Owned + where + DefaultAllocator: Allocator, + { + self.clone() + } +} + +unsafe impl RawStorage for VecStorage { type RStride = U1; type CStride = R; @@ -229,34 +243,39 @@ where true } - #[inline] - fn into_owned(self) -> Owned { - Owned(self) - } - - #[inline] - fn clone_owned(&self) -> Owned - where - T: Clone, - { - Owned(self.clone()) - } - #[inline] unsafe fn as_slice_unchecked(&self) -> &[T] { &self.data } } +unsafe impl Storage for VecStorage +where + DefaultAllocator: Allocator, +{ + #[inline] + fn into_owned(self) -> Owned + where + DefaultAllocator: Allocator, + { + self + } + + #[inline] + fn clone_owned(&self) -> Owned + where + DefaultAllocator: Allocator, + { + self.clone() + } +} + /* * - * StorageMut, ContiguousStorage. + * RawStorageMut, ContiguousStorage. * */ -unsafe impl StorageMut for VecStorage -where - DefaultAllocator: InnerAllocator, -{ +unsafe impl RawStorageMut for VecStorage { #[inline] fn ptr_mut(&mut self) -> *mut T { self.data.as_mut_ptr() @@ -268,18 +287,13 @@ where } } -unsafe impl ContiguousStorage for VecStorage where - DefaultAllocator: InnerAllocator -{ -} +unsafe impl IsContiguous for VecStorage {} -unsafe impl ContiguousStorageMut for VecStorage where - DefaultAllocator: InnerAllocator -{ -} - -impl ReshapableStorage - for VecStorage +impl ReshapableStorage for VecStorage +where + T: Scalar, + C1: Dim, + C2: Dim, { type Output = VecStorage; @@ -293,8 +307,11 @@ impl ReshapableStorage } } -impl ReshapableStorage - for VecStorage +impl ReshapableStorage for VecStorage +where + T: Scalar, + C1: Dim, + R2: DimName, { type Output = VecStorage; @@ -308,10 +325,7 @@ impl ReshapableStorage } } -unsafe impl StorageMut for VecStorage -where - DefaultAllocator: InnerAllocator, -{ +unsafe impl RawStorageMut for VecStorage { #[inline] fn ptr_mut(&mut self) -> *mut T { self.data.as_mut_ptr() @@ -323,8 +337,11 @@ where } } -impl ReshapableStorage - for VecStorage +impl ReshapableStorage for VecStorage +where + T: Scalar, + R1: DimName, + C2: Dim, { type Output = VecStorage; @@ -338,8 +355,11 @@ impl ReshapableStorage } } -impl ReshapableStorage - for VecStorage +impl ReshapableStorage for VecStorage +where + T: Scalar, + R1: DimName, + R2: DimName, { type Output = VecStorage; @@ -368,16 +388,6 @@ impl Abomonation for VecStorage { } } -unsafe impl ContiguousStorage for VecStorage where - DefaultAllocator: InnerAllocator -{ -} - -unsafe impl ContiguousStorageMut for VecStorage where - DefaultAllocator: InnerAllocator -{ -} - impl Extend for VecStorage { /// Extends the number of columns of the `VecStorage` with elements /// from the given iterator. @@ -407,9 +417,12 @@ impl<'a, T: 'a + Copy, R: Dim> Extend<&'a T> for VecStorage { } } -impl Extend> for VecStorage +impl Extend> for VecStorage where - SV: Storage, + T: Scalar, + R: Dim, + RV: Dim, + SV: RawStorage, ShapeConstraint: SameNumberOfRows, { /// Extends the number of columns of the `VecStorage` with vectors diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index 2cfbec26..c9684238 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -1,52 +1,24 @@ -use std::fmt; - +#[cfg(feature = "arbitrary")] +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; use crate::base::allocator::Allocator; -use crate::base::dimension::{Dim, DimName, Dynamic}; +use crate::base::dimension::{Dim, Dynamic}; +use crate::base::Scalar; use crate::base::{DefaultAllocator, OMatrix}; use crate::linalg::givens::GivensRotation; -use crate::storage::Owned; use simba::scalar::ComplexField; /// A random orthogonal matrix. -pub struct RandomOrthogonal +#[derive(Clone, Debug)] +pub struct RandomOrthogonal where DefaultAllocator: Allocator, { m: OMatrix, } -impl Copy for RandomOrthogonal -where - DefaultAllocator: Allocator, - Owned: Copy, -{ -} - -impl Clone for RandomOrthogonal -where - DefaultAllocator: Allocator, - Owned: Clone, -{ - fn clone(&self) -> Self { - Self { m: self.m.clone() } - } -} - -impl fmt::Debug for RandomOrthogonal -where - DefaultAllocator: Allocator, - Owned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("RandomOrthogonal") - .field("m", &self.m) - .finish() - } -} - impl RandomOrthogonal where DefaultAllocator: Allocator, diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index 3e119946..a915f2fc 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -1,50 +1,25 @@ -use std::fmt; - +#[cfg(feature = "arbitrary")] +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, Dynamic}; -use crate::base::{DefaultAllocator, OMatrix, Owned}; +use crate::base::Scalar; +use crate::base::{DefaultAllocator, OMatrix}; use simba::scalar::ComplexField; use crate::debug::RandomOrthogonal; /// A random, well-conditioned, symmetric definite-positive matrix. -pub struct RandomSDP +#[derive(Clone, Debug)] +pub struct RandomSDP where DefaultAllocator: Allocator, { m: OMatrix, } -impl Copy for RandomSDP -where - DefaultAllocator: Allocator, - Owned: Copy, -{ -} - -impl Clone for RandomSDP -where - DefaultAllocator: Allocator, - Owned: Clone, -{ - fn clone(&self) -> Self { - Self { m: self.m.clone() } - } -} - -impl fmt::Debug for RandomSDP -where - DefaultAllocator: Allocator, - Owned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("RandomSDP").field("m", &self.m).finish() - } -} - impl RandomSDP where DefaultAllocator: Allocator, diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index 6ad5bef5..6dd8936d 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -2,15 +2,15 @@ #![allow(clippy::op_ref)] use crate::{ - Isometry3, Matrix4, Normed, OVector, Point3, Quaternion, SimdRealField, Translation3, Unit, - UnitQuaternion, Vector3, Zero, U8, + Isometry3, Matrix4, Normed, OVector, Point3, Quaternion, Scalar, SimdRealField, Translation3, + Unit, UnitQuaternion, Vector3, Zero, U8, }; use approx::{AbsDiffEq, RelativeEq, UlpsEq}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; -use simba::scalar::RealField; +use simba::scalar::{ClosedNeg, RealField}; /// A dual quaternion. /// @@ -46,16 +46,16 @@ pub struct DualQuaternion { pub dual: Quaternion, } -impl Eq for DualQuaternion {} +impl Eq for DualQuaternion {} -impl PartialEq for DualQuaternion { +impl PartialEq for DualQuaternion { #[inline] fn eq(&self, right: &Self) -> bool { self.real == right.real && self.dual == right.dual } } -impl Default for DualQuaternion { +impl Default for DualQuaternion { fn default() -> Self { Self { real: Quaternion::default(), @@ -267,7 +267,10 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for DualQuaternion { +impl Serialize for DualQuaternion +where + T: Serialize, +{ fn serialize(&self, serializer: S) -> Result<::Ok, ::Error> where S: Serializer, @@ -277,7 +280,10 @@ impl Serialize for DualQuaternion { } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion { +impl<'a, T: SimdRealField> Deserialize<'a> for DualQuaternion +where + T: Deserialize<'a>, +{ fn deserialize(deserializer: Des) -> Result where Des: Deserializer<'a>, @@ -293,14 +299,9 @@ impl<'a, T: Deserialize<'a>> Deserialize<'a> for DualQuaternion { } } -impl DualQuaternion { - // TODO: Cloning shouldn't be necessary. - // TODO: rename into `into_vector` to appease clippy. - fn to_vector(self) -> OVector - where - T: Clone, - { - (self.as_ref().clone()).into() +impl DualQuaternion { + fn to_vector(self) -> OVector { + (*self.as_ref()).into() } } @@ -356,14 +357,14 @@ impl> UlpsEq for DualQuaternion { /// A unit quaternions. May be used to represent a rotation followed by a translation. pub type UnitDualQuaternion = Unit>; -impl PartialEq for UnitDualQuaternion { +impl PartialEq for UnitDualQuaternion { #[inline] fn eq(&self, rhs: &Self) -> bool { self.as_ref().eq(rhs.as_ref()) } } -impl Eq for UnitDualQuaternion {} +impl Eq for UnitDualQuaternion {} impl Normed for DualQuaternion { type Norm = T::SimdRealField; @@ -391,7 +392,10 @@ impl Normed for DualQuaternion { } } -impl UnitDualQuaternion { +impl UnitDualQuaternion +where + T::Element: SimdRealField, +{ /// The underlying dual quaternion. /// /// Same as `self.as_ref()`. @@ -410,12 +414,7 @@ impl UnitDualQuaternion { pub fn dual_quaternion(&self) -> &DualQuaternion { self.as_ref() } -} -impl UnitDualQuaternion -where - T::Element: SimdRealField, -{ /// Compute the conjugate of this unit quaternion. /// /// # Example @@ -617,7 +616,7 @@ where #[must_use] pub fn sclerp(&self, other: &Self, t: T) -> Self where - T: RealField + RelativeEq, + T: RealField, { self.try_sclerp(other, t, T::default_epsilon()) .expect("DualQuaternion sclerp: ambiguous configuration.") @@ -637,7 +636,7 @@ where #[must_use] pub fn try_sclerp(&self, other: &Self, t: T, epsilon: T) -> Option where - T: RealField + RelativeEq, + T: RealField, { let two = T::one() + T::one(); let half = T::one() / two; diff --git a/src/geometry/dual_quaternion_construction.rs b/src/geometry/dual_quaternion_construction.rs index d692d781..ea4c7ee2 100644 --- a/src/geometry/dual_quaternion_construction.rs +++ b/src/geometry/dual_quaternion_construction.rs @@ -1,5 +1,5 @@ use crate::{ - DualQuaternion, Isometry3, Quaternion, SimdRealField, Translation3, UnitDualQuaternion, + DualQuaternion, Isometry3, Quaternion, Scalar, SimdRealField, Translation3, UnitDualQuaternion, UnitQuaternion, }; use num::{One, Zero}; @@ -7,7 +7,7 @@ use num::{One, Zero}; use quickcheck::{Arbitrary, Gen}; use simba::scalar::SupersetOf; -impl DualQuaternion { +impl DualQuaternion { /// Creates a dual quaternion from its rotation and translation components. /// /// # Example @@ -60,7 +60,7 @@ impl DualQuaternion { /// let q2 = q.cast::(); /// assert_eq!(q2, DualQuaternion::from_real(Quaternion::new(1.0f32, 2.0, 3.0, 4.0))); /// ``` - pub fn cast(self) -> DualQuaternion + pub fn cast(self) -> DualQuaternion where DualQuaternion: SupersetOf, { @@ -156,7 +156,7 @@ impl UnitDualQuaternion { /// let q2 = q.cast::(); /// assert_eq!(q2, UnitDualQuaternion::::identity()); /// ``` - pub fn cast(self) -> UnitDualQuaternion + pub fn cast(self) -> UnitDualQuaternion where UnitDualQuaternion: SupersetOf, { diff --git a/src/geometry/dual_quaternion_conversion.rs b/src/geometry/dual_quaternion_conversion.rs index 2afffe26..94ef9e97 100644 --- a/src/geometry/dual_quaternion_conversion.rs +++ b/src/geometry/dual_quaternion_conversion.rs @@ -24,7 +24,8 @@ use crate::geometry::{ impl SubsetOf> for DualQuaternion where - T2: SupersetOf, + T1: SimdRealField, + T2: SimdRealField + SupersetOf, { #[inline] fn to_superset(&self) -> DualQuaternion { @@ -48,7 +49,8 @@ where impl SubsetOf> for UnitDualQuaternion where - T2: SupersetOf, + T1: SimdRealField, + T2: SimdRealField + SupersetOf, { #[inline] fn to_superset(&self) -> UnitDualQuaternion { diff --git a/src/geometry/dual_quaternion_ops.rs b/src/geometry/dual_quaternion_ops.rs index 151b2e05..2a1527ec 100644 --- a/src/geometry/dual_quaternion_ops.rs +++ b/src/geometry/dual_quaternion_ops.rs @@ -56,21 +56,21 @@ use std::ops::{ Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign, }; -impl AsRef<[T; 8]> for DualQuaternion { +impl AsRef<[T; 8]> for DualQuaternion { #[inline] fn as_ref(&self) -> &[T; 8] { - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Self as *const [T; 8]) } } } -impl AsMut<[T; 8]> for DualQuaternion { +impl AsMut<[T; 8]> for DualQuaternion { #[inline] fn as_mut(&mut self) -> &mut [T; 8] { - unsafe { &mut *(self as *mut _ as *mut _) } + unsafe { &mut *(self as *mut Self as *mut [T; 8]) } } } -impl Index for DualQuaternion { +impl Index for DualQuaternion { type Output = T; #[inline] @@ -79,7 +79,7 @@ impl Index for DualQuaternion { } } -impl IndexMut for DualQuaternion { +impl IndexMut for DualQuaternion { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { &mut self.as_mut()[i] diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs index 74e2f05d..f8e63d07 100755 --- a/src/geometry/isometry.rs +++ b/src/geometry/isometry.rs @@ -15,7 +15,7 @@ use simba::simd::SimdRealField; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar, Unit}; use crate::geometry::{AbstractRotation, Point, Translation}; @@ -53,6 +53,7 @@ use crate::geometry::{AbstractRotation, Point, Translation}; /// # Conversion to a matrix /// * [Conversion to a matrix `to_matrix`…](#conversion-to-a-matrix) /// +#[repr(C)] #[derive(Debug)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] #[cfg_attr( @@ -79,6 +80,7 @@ pub struct Isometry { #[cfg(feature = "abomonation-serialize")] impl Abomonation for Isometry where + T: SimdRealField, R: Abomonation, Translation: Abomonation, { @@ -104,7 +106,10 @@ mod rkyv_impl { use crate::{base::Scalar, geometry::Translation}; use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize}; - impl Archive for Isometry { + impl Archive for Isometry + where + T::Archived: Scalar, + { type Archived = Isometry; type Resolver = (R::Resolver, as Archive>::Resolver); @@ -127,8 +132,8 @@ mod rkyv_impl { } } - impl, R: Serialize, S: Fallible + ?Sized, const D: usize> Serialize - for Isometry + impl, R: Serialize, S: Fallible + ?Sized, const D: usize> + Serialize for Isometry where T::Archived: Scalar, { @@ -140,7 +145,7 @@ mod rkyv_impl { } } - impl + impl Deserialize, _D> for Isometry where T::Archived: Scalar + Deserialize, @@ -155,9 +160,9 @@ mod rkyv_impl { } } -impl hash::Hash for Isometry +impl hash::Hash for Isometry where - InnerOwned>: hash::Hash, + Owned>: hash::Hash, { fn hash(&self, state: &mut H) { self.translation.hash(state); @@ -165,9 +170,12 @@ where } } -impl Copy for Isometry where InnerOwned>: Copy {} +impl Copy for Isometry where + Owned>: Copy +{ +} -impl Clone for Isometry { +impl Clone for Isometry { #[inline] fn clone(&self) -> Self { Self { @@ -630,7 +638,7 @@ where * Display * */ -impl fmt::Display for Isometry +impl fmt::Display for Isometry where R: fmt::Display, { diff --git a/src/geometry/isometry_construction.rs b/src/geometry/isometry_construction.rs index fe09b5cd..9b855599 100644 --- a/src/geometry/isometry_construction.rs +++ b/src/geometry/isometry_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -97,7 +97,7 @@ where T: SimdRealField + Arbitrary + Send, T::Element: SimdRealField, R: AbstractRotation + Arbitrary + Send, - InnerOwned>: Send, + Owned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index 441ecd2d..b349a621 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -18,29 +18,27 @@ use crate::base::{Matrix4, Vector, Vector3}; use crate::geometry::{Point3, Projective3}; /// A 3D orthographic projection stored as a homogeneous 4x4 matrix. -#[repr(transparent)] +#[repr(C)] pub struct Orthographic3 { matrix: Matrix4, } -impl Copy for Orthographic3 {} +impl Copy for Orthographic3 {} -impl Clone for Orthographic3 { +impl Clone for Orthographic3 { #[inline] fn clone(&self) -> Self { - Self { - matrix: self.matrix.clone(), - } + Self::from_matrix_unchecked(self.matrix) } } -impl fmt::Debug for Orthographic3 { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Orthographic3 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { self.matrix.fmt(f) } } -impl PartialEq for Orthographic3 { +impl PartialEq for Orthographic3 { #[inline] fn eq(&self, right: &Self) -> bool { self.matrix == right.matrix @@ -64,7 +62,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Orthographic3 { +impl Serialize for Orthographic3 { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -74,7 +72,7 @@ impl Serialize for Orthographic3 { } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Deserialize<'a>> Deserialize<'a> for Orthographic3 { +impl<'a, T: RealField + Deserialize<'a>> Deserialize<'a> for Orthographic3 { fn deserialize(deserializer: Des) -> Result where Des: Deserializer<'a>, @@ -85,8 +83,31 @@ impl<'a, T: Deserialize<'a>> Deserialize<'a> for Orthographic3 { } } -/// # Basic methods and casts. impl Orthographic3 { + /// Wraps the given matrix to interpret it as a 3D orthographic matrix. + /// + /// It is not checked whether or not the given matrix actually represents an orthographic + /// projection. + /// + /// # Example + /// ``` + /// # use nalgebra::{Orthographic3, Point3, Matrix4}; + /// let mat = Matrix4::new( + /// 2.0 / 9.0, 0.0, 0.0, -11.0 / 9.0, + /// 0.0, 2.0 / 18.0, 0.0, -22.0 / 18.0, + /// 0.0, 0.0, -2.0 / 999.9, -1000.1 / 999.9, + /// 0.0, 0.0, 0.0, 1.0 + /// ); + /// let proj = Orthographic3::from_matrix_unchecked(mat); + /// assert_eq!(proj, Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0)); + /// ``` + #[inline] + pub const fn from_matrix_unchecked(matrix: Matrix4) -> Self { + Self { matrix } + } +} + +impl Orthographic3 { /// Creates a new orthographic projection matrix. /// /// This follows the OpenGL convention, so this will flip the `z` axis. @@ -130,11 +151,8 @@ impl Orthographic3 { /// assert_relative_eq!(proj.project_point(&p8), Point3::new(-1.0, -1.0, -1.0)); /// ``` #[inline] - pub fn new(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> Self - where - T: RealField, - { - let matrix = Matrix4::identity(); + pub fn new(left: T, right: T, bottom: T, top: T, znear: T, zfar: T) -> Self { + let matrix = Matrix4::::identity(); let mut res = Self::from_matrix_unchecked(matrix); res.set_left_and_right(left, right); @@ -146,10 +164,7 @@ impl Orthographic3 { /// Creates a new orthographic projection matrix from an aspect ratio and the vertical field of view. #[inline] - pub fn from_fov(aspect: T, vfov: T, znear: T, zfar: T) -> Self - where - T: RealField, - { + pub fn from_fov(aspect: T, vfov: T, znear: T, zfar: T) -> Self { assert!( znear != zfar, "The far plane must not be equal to the near plane." @@ -192,10 +207,7 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] - pub fn inverse(&self) -> Matrix4 - where - T: RealField, - { + pub fn inverse(&self) -> Matrix4 { let mut res = self.to_homogeneous(); let inv_m11 = T::one() / self.matrix[(0, 0)]; @@ -229,7 +241,6 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] - // TODO: rename into `into_homogeneous` to appease clippy. pub fn to_homogeneous(self) -> Matrix4 { self.matrix } @@ -265,8 +276,7 @@ impl Orthographic3 { #[inline] #[must_use] pub fn as_projective(&self) -> &Projective3 { - // Safety: Self and Projective3 are both #[repr(transparent)] of a matrix. - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Orthographic3 as *const Projective3) } } /// This transformation seen as a `Projective3`. @@ -279,7 +289,6 @@ impl Orthographic3 { /// ``` #[inline] #[must_use] - // TODO: rename into `into_projective` to appease clippy. pub fn to_projective(self) -> Projective3 { Projective3::from_matrix_unchecked(self.matrix) } @@ -311,10 +320,7 @@ impl Orthographic3 { pub fn unwrap(self) -> Matrix4 { self.matrix } -} -/// # Mathematical methods. -impl Orthographic3 { /// The left offset of the view cuboid. /// /// ``` diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index 5007b26b..d5a6fe42 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -34,7 +34,7 @@ impl Clone for Perspective3 { } impl fmt::Debug for Perspective3 { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { self.matrix.fmt(f) } } @@ -158,8 +158,7 @@ impl Perspective3 { #[inline] #[must_use] pub fn as_projective(&self) -> &Projective3 { - // Safety: Self and Projective3 are both #[repr(transparent)] of a matrix. - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Perspective3 as *const Projective3) } } /// This transformation seen as a `Projective3`. diff --git a/src/geometry/point.rs b/src/geometry/point.rs index d73c4f22..098b5c2a 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -5,7 +5,6 @@ use std::fmt; use std::hash; #[cfg(feature = "abomonation-serialize")] use std::io::{Result as IOResult, Write}; -use std::mem::{ManuallyDrop, MaybeUninit}; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -15,13 +14,11 @@ use abomonation::Abomonation; use simba::simd::SimdPartialOrd; -use crate::allocator::InnerAllocator; use crate::base::allocator::Allocator; use crate::base::dimension::{DimName, DimNameAdd, DimNameSum, U1}; use crate::base::iter::{MatrixIter, MatrixIterMut}; -use crate::base::{Const, DefaultAllocator, OVector}; -use crate::storage::InnerOwned; -use crate::Scalar; +use crate::base::{Const, DefaultAllocator, OVector, Scalar}; +use std::mem::MaybeUninit; /// A point in an euclidean space. /// @@ -42,16 +39,17 @@ use crate::Scalar; /// achieved by multiplication, e.g., `isometry * point` or `rotation * point`. Some of these transformation /// may have some other methods, e.g., `isometry.inverse_transform_point(&point)`. See the documentation /// of said transformations for details. -#[repr(transparent)] -pub struct OPoint +#[repr(C)] +#[derive(Debug, Clone)] +pub struct OPoint where - DefaultAllocator: InnerAllocator, + DefaultAllocator: Allocator, { /// The coordinates of this point, i.e., the shift from the origin. pub coords: OVector, } -impl hash::Hash for OPoint +impl hash::Hash for OPoint where DefaultAllocator: Allocator, { @@ -60,37 +58,15 @@ where } } -impl Copy for OPoint +impl Copy for OPoint where DefaultAllocator: Allocator, OVector: Copy, { } -impl Clone for OPoint -where - DefaultAllocator: Allocator, - OVector: Clone, -{ - fn clone(&self) -> Self { - Self::from(self.coords.clone()) - } -} - -impl fmt::Debug for OPoint -where - DefaultAllocator: Allocator, - OVector: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("OPoint") - .field("coords", &self.coords) - .finish() - } -} - #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable for OPoint +unsafe impl bytemuck::Zeroable for OPoint where OVector: bytemuck::Zeroable, DefaultAllocator: Allocator, @@ -98,7 +74,7 @@ where } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod for OPoint +unsafe impl bytemuck::Pod for OPoint where T: Copy, OVector: bytemuck::Pod, @@ -107,10 +83,10 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for OPoint +impl Serialize for OPoint where DefaultAllocator: Allocator, - >::Buffer: Serialize, + >::Buffer: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -121,10 +97,10 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T: Deserialize<'a>, D: DimName> Deserialize<'a> for OPoint +impl<'a, T: Scalar, D: DimName> Deserialize<'a> for OPoint where DefaultAllocator: Allocator, - >::Buffer: Deserialize<'a>, + >::Buffer: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where @@ -139,6 +115,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for OPoint where + T: Scalar, OVector: Abomonation, DefaultAllocator: Allocator, { @@ -155,7 +132,7 @@ where } } -impl OPoint +impl OPoint where DefaultAllocator: Allocator, { @@ -173,9 +150,8 @@ where /// ``` #[inline] #[must_use] - pub fn map T2>(&self, f: F) -> OPoint + pub fn map T2>(&self, f: F) -> OPoint where - T: Clone, DefaultAllocator: Allocator, { self.coords.map(f).into() @@ -187,19 +163,16 @@ where /// ``` /// # use nalgebra::{Point2, Point3}; /// let mut p = Point2::new(1.0, 2.0); - /// p.apply(|e| e * 10.0); + /// p.apply(|e| *e = *e * 10.0); /// assert_eq!(p, Point2::new(10.0, 20.0)); /// /// // This works in any dimension. /// let mut p = Point3::new(1.0, 2.0, 3.0); - /// p.apply(|e| e * 10.0); + /// p.apply(|e| *e = *e * 10.0); /// assert_eq!(p, Point3::new(10.0, 20.0, 30.0)); /// ``` #[inline] - pub fn apply T>(&mut self, f: F) - where - T: Clone, - { + pub fn apply(&mut self, f: F) { self.coords.apply(f) } @@ -221,45 +194,25 @@ where #[inline] #[must_use] pub fn to_homogeneous(&self) -> OVector> - where - T: One + Clone, - D: DimNameAdd, - DefaultAllocator: Allocator>, - { - let mut res = OVector::<_, DimNameSum>::new_uninitialized(); - for i in 0..D::dim() { - unsafe { - *res.get_unchecked_mut(i) = MaybeUninit::new(self.coords[i].clone()); - } - } - - res[(D::dim(), 0)] = MaybeUninit::new(T::one()); - - unsafe { res.assume_init() } - } - - /// Converts this point into a vector in homogeneous coordinates, i.e., appends a `1` at the - /// end of it. Unlike [`to_homogeneous`], this method does not require `T: Clone`. - pub fn into_homogeneous(self) -> OVector> where T: One, D: DimNameAdd, DefaultAllocator: Allocator>, { - let mut res = OVector::<_, DimNameSum>::new_uninitialized(); - let mut md = self.manually_drop(); + // TODO: this is mostly a copy-past from Vector::push. + // But we can’t use Vector::push because of the DimAdd bound + // (which we don’t use because we use DimNameAdd). + // We should find a way to re-use Vector::push. + let len = self.len(); + let mut res = crate::Matrix::uninit(DimNameSum::::name(), Const::<1>); + // This is basically a copy_from except that we warp the copied + // values into MaybeUninit. + res.generic_slice_mut((0, 0), self.coords.shape_generic()) + .zip_apply(&self.coords, |out, e| *out = MaybeUninit::new(e)); + res[(len, 0)] = MaybeUninit::new(T::one()); - for i in 0..D::dim() { - unsafe { - *res.get_unchecked_mut(i) = - MaybeUninit::new(ManuallyDrop::take(md.coords.get_unchecked_mut(i))); - } - } - - unsafe { - *res.get_unchecked_mut(D::dim()) = MaybeUninit::new(T::one()); - res.assume_init() - } + // Safety: res has been fully initialized. + unsafe { res.assume_init() } } /// Creates a new point with the given coordinates. @@ -322,7 +275,9 @@ where /// assert_eq!(it.next(), Some(3.0)); /// assert_eq!(it.next(), None); #[inline] - pub fn iter(&self) -> MatrixIter, InnerOwned> { + pub fn iter( + &self, + ) -> MatrixIter<'_, T, D, Const<1>, >::Buffer> { self.coords.iter() } @@ -346,7 +301,9 @@ where /// /// assert_eq!(p, Point3::new(10.0, 20.0, 30.0)); #[inline] - pub fn iter_mut(&mut self) -> MatrixIterMut, InnerOwned> { + pub fn iter_mut( + &mut self, + ) -> MatrixIterMut<'_, T, D, Const<1>, >::Buffer> { self.coords.iter_mut() } @@ -364,7 +321,7 @@ where } } -impl AbsDiffEq for OPoint +impl AbsDiffEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -382,7 +339,7 @@ where } } -impl RelativeEq for OPoint +impl RelativeEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -404,7 +361,7 @@ where } } -impl UlpsEq for OPoint +impl UlpsEq for OPoint where T::Epsilon: Copy, DefaultAllocator: Allocator, @@ -420,9 +377,9 @@ where } } -impl Eq for OPoint where DefaultAllocator: Allocator {} +impl Eq for OPoint where DefaultAllocator: Allocator {} -impl PartialEq for OPoint +impl PartialEq for OPoint where DefaultAllocator: Allocator, { @@ -432,7 +389,7 @@ where } } -impl PartialOrd for OPoint +impl PartialOrd for OPoint where DefaultAllocator: Allocator, { @@ -497,7 +454,7 @@ where * Display * */ -impl fmt::Display for OPoint +impl fmt::Display for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index 94876c18..d2393146 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -1,5 +1,3 @@ -use std::mem::{ManuallyDrop, MaybeUninit}; - #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -22,23 +20,10 @@ use simba::scalar::{ClosedDiv, SupersetOf}; use crate::geometry::Point; /// # Other construction methods -impl OPoint +impl OPoint where DefaultAllocator: Allocator, { - /// Creates a new point with uninitialized coordinates. - #[inline] - pub fn new_uninitialized() -> OPoint, D> { - OPoint::from(OVector::new_uninitialized_generic(D::name(), Const::<1>)) - } - - /// Converts `self` into a point whose coordinates must be manually dropped. - /// This should be zero-cost. - #[inline] - pub fn manually_drop(self) -> OPoint, D> { - OPoint::from(self.coords.manually_drop()) - } - /// Creates a new point with all coordinates equal to zero. /// /// # Example @@ -57,9 +42,9 @@ where #[inline] pub fn origin() -> Self where - T: Zero + Clone, + T: Zero, { - Self::from(OVector::<_, D>::zeros()) + Self::from(OVector::from_element(T::zero())) } /// Creates a new point from a slice. @@ -77,11 +62,8 @@ where /// assert_eq!(pt, Point3::new(1.0, 2.0, 3.0)); /// ``` #[inline] - pub fn from_slice(components: &[T]) -> Self - where - T: Clone, - { - Self::from(OVector::<_, D>::from_row_slice(components)) + pub fn from_slice(components: &[T]) -> Self { + Self::from(OVector::from_row_slice(components)) } /// Creates a new point from its homogeneous vector representation. @@ -139,7 +121,7 @@ where /// let pt2 = pt.cast::(); /// assert_eq!(pt2, Point2::new(1.0f32, 2.0)); /// ``` - pub fn cast(self) -> OPoint + pub fn cast(self) -> OPoint where OPoint: SupersetOf, DefaultAllocator: Allocator, @@ -169,7 +151,7 @@ where } #[cfg(feature = "rand-no-std")] -impl Distribution> for Standard +impl Distribution> for Standard where Standard: Distribution, DefaultAllocator: Allocator, @@ -182,10 +164,10 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for OPoint +impl Arbitrary for OPoint where + >::Buffer: Send, DefaultAllocator: Allocator, - crate::base::storage::InnerOwned: Clone + Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -201,7 +183,7 @@ where // NOTE: the impl for Point1 is not with the others so that we // can add a section with the impl block comment. /// # Construction from individual components -impl Point1 { +impl Point1 { /// Initializes this point from its components. /// /// # Example @@ -220,7 +202,7 @@ impl Point1 { } macro_rules! componentwise_constructors_impl( ($($doc: expr; $Point: ident, $Vector: ident, $($args: ident:$irow: expr),*);* $(;)*) => {$( - impl $Point { + impl $Point { #[doc = "Initializes this point from its components."] #[doc = "# Example\n```"] #[doc = $doc] diff --git a/src/geometry/point_conversion.rs b/src/geometry/point_conversion.rs index b564f0ad..f35a9fc6 100644 --- a/src/geometry/point_conversion.rs +++ b/src/geometry/point_conversion.rs @@ -2,7 +2,7 @@ use num::{One, Zero}; use simba::scalar::{ClosedDiv, SubsetOf, SupersetOf}; use simba::simd::PrimitiveSimdValue; -use crate::base::allocator::{Allocator, InnerAllocator}; +use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; use crate::base::{Const, DefaultAllocator, Matrix, OVector, Scalar}; @@ -19,7 +19,8 @@ use crate::{DimName, OPoint}; impl SubsetOf> for OPoint where - T2: SupersetOf, + T1: Scalar, + T2: Scalar + SupersetOf, DefaultAllocator: Allocator + Allocator, { #[inline] @@ -43,6 +44,7 @@ where impl SubsetOf>> for OPoint where D: DimNameAdd, + T1: Scalar, T2: Scalar + Zero + One + ClosedDiv + SupersetOf, DefaultAllocator: Allocator + Allocator @@ -54,7 +56,7 @@ where #[inline] fn to_superset(&self) -> OVector> { let p: OPoint = self.to_superset(); - p.into_homogeneous() + p.to_homogeneous() } #[inline] @@ -64,25 +66,25 @@ where #[inline] fn from_superset_unchecked(v: &OVector>) -> Self { - let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].clone(); + let coords = v.generic_slice((0, 0), (D::name(), Const::<1>)) / v[D::dim()].inlined_clone(); Self { coords: crate::convert_unchecked(coords), } } } -impl From> for OVector> +impl From> for OVector> where D: DimNameAdd, DefaultAllocator: Allocator> + Allocator, { #[inline] fn from(t: OPoint) -> Self { - t.into_homogeneous() + t.to_homogeneous() } } -impl From<[T; D]> for Point { +impl From<[T; D]> for Point { #[inline] fn from(coords: [T; D]) -> Self { Point { @@ -91,19 +93,16 @@ impl From<[T; D]> for Point { } } -impl From> for [T; D] -where - T: Clone, -{ +impl From> for [T; D] { #[inline] fn from(p: Point) -> Self { p.coords.into() } } -impl From> for OPoint +impl From> for OPoint where - DefaultAllocator: InnerAllocator, + DefaultAllocator: Allocator, { #[inline] fn from(coords: OVector) -> Self { @@ -111,81 +110,85 @@ where } } -impl From<[Point; 2]> for Point +impl From<[Point; 2]> + for Point where T: From<[::Element; 2]>, - T::Element: Scalar, + T::Element: Scalar + Copy, + >>::Buffer: Copy, { #[inline] fn from(arr: [Point; 2]) -> Self { - Self::from(OVector::from([ - arr[0].coords.clone(), - arr[1].coords.clone(), - ])) + Self::from(OVector::from([arr[0].coords, arr[1].coords])) } } -impl From<[Point; 4]> for Point +impl From<[Point; 4]> + for Point where T: From<[::Element; 4]>, - T::Element: Scalar, + T::Element: Scalar + Copy, + >>::Buffer: Copy, { #[inline] fn from(arr: [Point; 4]) -> Self { Self::from(OVector::from([ - arr[0].coords.clone(), - arr[1].coords.clone(), - arr[2].coords.clone(), - arr[3].coords.clone(), + arr[0].coords, + arr[1].coords, + arr[2].coords, + arr[3].coords, ])) } } -impl From<[Point; 8]> for Point +impl From<[Point; 8]> + for Point where T: From<[::Element; 8]>, - T::Element: Scalar, + T::Element: Scalar + Copy, + >>::Buffer: Copy, { #[inline] fn from(arr: [Point; 8]) -> Self { Self::from(OVector::from([ - arr[0].coords.clone(), - arr[1].coords.clone(), - arr[2].coords.clone(), - arr[3].coords.clone(), - arr[4].coords.clone(), - arr[5].coords.clone(), - arr[6].coords.clone(), - arr[7].coords.clone(), + arr[0].coords, + arr[1].coords, + arr[2].coords, + arr[3].coords, + arr[4].coords, + arr[5].coords, + arr[6].coords, + arr[7].coords, ])) } } -impl From<[Point; 16]> +impl From<[Point; 16]> for Point where T: From<[::Element; 16]>, - T::Element: Scalar, + T::Element: Scalar + Copy, + >>::Buffer: Copy, { #[inline] fn from(arr: [Point; 16]) -> Self { Self::from(OVector::from([ - arr[0].coords.clone(), - arr[1].coords.clone(), - arr[2].coords.clone(), - arr[3].coords.clone(), - arr[4].coords.clone(), - arr[5].coords.clone(), - arr[6].coords.clone(), - arr[7].coords.clone(), - arr[8].coords.clone(), - arr[9].coords.clone(), - arr[10].coords.clone(), - arr[11].coords.clone(), - arr[12].coords.clone(), - arr[13].coords.clone(), - arr[14].coords.clone(), - arr[15].coords.clone(), + arr[0].coords, + arr[1].coords, + arr[2].coords, + arr[3].coords, + arr[4].coords, + arr[5].coords, + arr[6].coords, + arr[7].coords, + arr[8].coords, + arr[9].coords, + arr[10].coords, + arr[11].coords, + arr[12].coords, + arr[13].coords, + arr[14].coords, + arr[15].coords, ])) } } diff --git a/src/geometry/point_coordinates.rs b/src/geometry/point_coordinates.rs index b9bd69a3..984a2fae 100644 --- a/src/geometry/point_coordinates.rs +++ b/src/geometry/point_coordinates.rs @@ -1,7 +1,7 @@ use std::ops::{Deref, DerefMut}; use crate::base::coordinates::{X, XY, XYZ, XYZW, XYZWA, XYZWAB}; -use crate::base::{U1, U2, U3, U4, U5, U6}; +use crate::base::{Scalar, U1, U2, U3, U4, U5, U6}; use crate::geometry::OPoint; @@ -13,7 +13,7 @@ use crate::geometry::OPoint; macro_rules! deref_impl( ($D: ty, $Target: ident $(, $comps: ident)*) => { - impl Deref for OPoint + impl Deref for OPoint { type Target = $Target; @@ -23,7 +23,7 @@ macro_rules! deref_impl( } } - impl DerefMut for OPoint + impl DerefMut for OPoint { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { diff --git a/src/geometry/point_ops.rs b/src/geometry/point_ops.rs index 72d91ff3..5b019a9d 100644 --- a/src/geometry/point_ops.rs +++ b/src/geometry/point_ops.rs @@ -21,7 +21,7 @@ use crate::DefaultAllocator; * Indexing. * */ -impl Index for OPoint +impl Index for OPoint where DefaultAllocator: Allocator, { @@ -33,7 +33,7 @@ where } } -impl IndexMut for OPoint +impl IndexMut for OPoint where DefaultAllocator: Allocator, { diff --git a/src/geometry/point_simba.rs b/src/geometry/point_simba.rs index aa630adf..ad7433af 100644 --- a/src/geometry/point_simba.rs +++ b/src/geometry/point_simba.rs @@ -1,8 +1,8 @@ use simba::simd::SimdValue; -use crate::base::OVector; +use crate::base::{OVector, Scalar}; + use crate::geometry::Point; -use crate::Scalar; impl SimdValue for Point where diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index 26bb8d97..cd248c94 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -6,7 +6,7 @@ use std::hash::{Hash, Hasher}; use std::io::{Result as IOResult, Write}; #[cfg(feature = "serde-serialize-no-std")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -26,29 +26,29 @@ use crate::geometry::{Point3, Rotation}; /// A quaternion. See the type alias `UnitQuaternion = Unit` for a quaternion /// that may be used as a rotation. -#[repr(transparent)] +#[repr(C)] #[derive(Debug, Copy, Clone)] pub struct Quaternion { /// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order. pub coords: Vector4, } -impl Hash for Quaternion { +impl Hash for Quaternion { fn hash(&self, state: &mut H) { self.coords.hash(state) } } -impl Eq for Quaternion {} +impl Eq for Quaternion {} -impl PartialEq for Quaternion { +impl PartialEq for Quaternion { #[inline] fn eq(&self, right: &Self) -> bool { self.coords == right.coords } } -impl Default for Quaternion { +impl Default for Quaternion { fn default() -> Self { Quaternion { coords: Vector4::zeros(), @@ -57,10 +57,10 @@ impl Default for Quaternion { } #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Zeroable for Quaternion where Vector4: bytemuck::Zeroable {} +unsafe impl bytemuck::Zeroable for Quaternion where Vector4: bytemuck::Zeroable {} #[cfg(feature = "bytemuck")] -unsafe impl bytemuck::Pod for Quaternion +unsafe impl bytemuck::Pod for Quaternion where Vector4: bytemuck::Pod, T: Copy, @@ -68,7 +68,7 @@ where } #[cfg(feature = "abomonation-serialize")] -impl Abomonation for Quaternion +impl Abomonation for Quaternion where Vector4: Abomonation, { @@ -86,7 +86,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Quaternion +impl Serialize for Quaternion where Owned: Serialize, { @@ -99,7 +99,7 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T> Deserialize<'a> for Quaternion +impl<'a, T: Scalar> Deserialize<'a> for Quaternion where Owned: Deserialize<'a>, { @@ -1045,8 +1045,8 @@ impl> UlpsEq for Quaternion { } } -impl fmt::Display for Quaternion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Display for Quaternion { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "Quaternion {} − ({}, {}, {})", @@ -1097,7 +1097,7 @@ impl UnitQuaternion where T::Element: SimdRealField, { - /// The rotation angle in \[0; pi\] of this unit quaternion. + /// The rotation angle in [0; pi] of this unit quaternion. /// /// # Example /// ``` diff --git a/src/geometry/quaternion_construction.rs b/src/geometry/quaternion_construction.rs index 451d5d55..61b1fe3e 100644 --- a/src/geometry/quaternion_construction.rs +++ b/src/geometry/quaternion_construction.rs @@ -1,7 +1,7 @@ #[cfg(feature = "arbitrary")] use crate::base::dimension::U4; #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -179,7 +179,7 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for Quaternion where - InnerOwned: Send, + Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -881,8 +881,8 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for UnitQuaternion where - InnerOwned: Send, - InnerOwned: Send, + Owned: Send, + Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/quaternion_conversion.rs b/src/geometry/quaternion_conversion.rs index d12797d2..6dfbfbc6 100644 --- a/src/geometry/quaternion_conversion.rs +++ b/src/geometry/quaternion_conversion.rs @@ -28,7 +28,8 @@ use crate::geometry::{ impl SubsetOf> for Quaternion where - T2: SupersetOf, + T1: Scalar, + T2: Scalar + SupersetOf, { #[inline] fn to_superset(&self) -> Quaternion { @@ -50,7 +51,8 @@ where impl SubsetOf> for UnitQuaternion where - T2: SupersetOf, + T1: Scalar, + T2: Scalar + SupersetOf, { #[inline] fn to_superset(&self) -> UnitQuaternion { @@ -237,14 +239,14 @@ where } } -impl From> for Quaternion { +impl From> for Quaternion { #[inline] fn from(coords: Vector4) -> Self { Self { coords } } } -impl From<[T; 4]> for Quaternion { +impl From<[T; 4]> for Quaternion { #[inline] fn from(coords: [T; 4]) -> Self { Self { diff --git a/src/geometry/quaternion_coordinates.rs b/src/geometry/quaternion_coordinates.rs index 40d8ca84..cb16e59e 100644 --- a/src/geometry/quaternion_coordinates.rs +++ b/src/geometry/quaternion_coordinates.rs @@ -12,14 +12,13 @@ impl Deref for Quaternion { #[inline] fn deref(&self) -> &Self::Target { - // Safety: Self and IJKW are both stored as contiguous coordinates. - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Self as *const Self::Target) } } } impl DerefMut for Quaternion { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut _ as *mut _) } + unsafe { &mut *(self as *mut Self as *mut Self::Target) } } } diff --git a/src/geometry/quaternion_ops.rs b/src/geometry/quaternion_ops.rs index 12c371c2..eb7a15cd 100644 --- a/src/geometry/quaternion_ops.rs +++ b/src/geometry/quaternion_ops.rs @@ -59,12 +59,12 @@ use std::ops::{ use crate::base::dimension::U3; use crate::base::storage::Storage; -use crate::base::{Const, Unit, Vector, Vector3}; +use crate::base::{Const, Scalar, Unit, Vector, Vector3}; use crate::SimdRealField; use crate::geometry::{Point3, Quaternion, Rotation, UnitQuaternion}; -impl Index for Quaternion { +impl Index for Quaternion { type Output = T; #[inline] @@ -73,7 +73,7 @@ impl Index for Quaternion { } } -impl IndexMut for Quaternion { +impl IndexMut for Quaternion { #[inline] fn index_mut(&mut self, i: usize) -> &mut T { &mut self.coords[i] @@ -371,12 +371,12 @@ quaternion_op_impl!( ; self: Rotation, rhs: UnitQuaternion, Output = UnitQuaternion; - UnitQuaternion::::from_rotation_matrix(&self) / rhs;); + UnitQuaternion::::from_rotation_matrix(&self) / rhs; ); // UnitQuaternion × Vector quaternion_op_impl!( Mul, mul; - SB: Storage>; + SB: Storage> ; self: &'a UnitQuaternion, rhs: &'b Vector, SB>, Output = Vector3; { diff --git a/src/geometry/reflection.rs b/src/geometry/reflection.rs index cc12594a..a48b8024 100644 --- a/src/geometry/reflection.rs +++ b/src/geometry/reflection.rs @@ -1,5 +1,3 @@ -use std::mem::MaybeUninit; - use crate::base::constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint}; use crate::base::{Const, Matrix, Unit, Vector}; use crate::dimension::{Dim, U1}; @@ -9,7 +7,7 @@ use simba::scalar::ComplexField; use crate::geometry::Point; /// A reflection wrt. a plane. -pub struct Reflection { +pub struct Reflection { axis: Vector, bias: T, } @@ -88,40 +86,40 @@ impl> Reflection { pub fn reflect_rows( &self, lhs: &mut Matrix, - work: &mut Vector, R2, S3>, + work: &mut Vector, ) where S2: StorageMut, - S3: StorageMut, R2>, + S3: StorageMut, ShapeConstraint: DimEq + AreMultipliable, { - let mut work = lhs.mul_to(&self.axis, work); + lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); } let m_two: T = crate::convert(-2.0f64); - lhs.gerc(m_two, &work, &self.axis, T::one()); + lhs.gerc(m_two, work, &self.axis, T::one()); } /// Applies the reflection to the rows of `lhs`. pub fn reflect_rows_with_sign( &self, lhs: &mut Matrix, - work: &mut Vector, R2, S3>, + work: &mut Vector, sign: T, ) where S2: StorageMut, - S3: StorageMut, R2>, + S3: StorageMut, ShapeConstraint: DimEq + AreMultipliable, { - let mut work = lhs.mul_to(&self.axis, work); + lhs.mul_to(&self.axis, work); if !self.bias.is_zero() { work.add_scalar_mut(-self.bias); } let m_two = sign.scale(crate::convert(-2.0f64)); - lhs.gerc(m_two, &work, &self.axis, sign); + lhs.gerc(m_two, work, &self.axis, sign); } } diff --git a/src/geometry/rotation.rs b/src/geometry/rotation.rs index 7cde243a..33e42dda 100755 --- a/src/geometry/rotation.rs +++ b/src/geometry/rotation.rs @@ -9,8 +9,7 @@ use std::io::{Result as IOResult, Write}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde-serialize-no-std")] -use crate::base::storage::InnerOwned; -use crate::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "abomonation-serialize")] use abomonation::Abomonation; @@ -54,26 +53,29 @@ use crate::geometry::Point; /// # Conversion /// * [Conversion to a matrix `matrix`, `to_homogeneous`…](#conversion-to-a-matrix) /// -#[repr(transparent)] +#[repr(C)] #[derive(Debug)] pub struct Rotation { matrix: SMatrix, } -impl hash::Hash for Rotation +impl hash::Hash for Rotation where - InnerOwned, Const>: hash::Hash, + , Const>>::Buffer: hash::Hash, { fn hash(&self, state: &mut H) { self.matrix.hash(state) } } -impl Copy for Rotation where InnerOwned, Const>: Copy {} +impl Copy for Rotation where + , Const>>::Buffer: Copy +{ +} -impl Clone for Rotation +impl Clone for Rotation where - InnerOwned, Const>: Clone, + , Const>>::Buffer: Clone, { #[inline] fn clone(&self) -> Self { @@ -100,6 +102,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Rotation where + T: Scalar, SMatrix: Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { @@ -118,7 +121,7 @@ where #[cfg(feature = "serde-serialize-no-std")] impl Serialize for Rotation where - InnerOwned, Const>: Serialize, + Owned, Const>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -129,9 +132,9 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T, const D: usize> Deserialize<'a> for Rotation +impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Rotation where - InnerOwned, Const>: Deserialize<'a>, + Owned, Const>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where @@ -173,7 +176,7 @@ impl Rotation { } /// # Conversion to a matrix -impl Rotation { +impl Rotation { /// A reference to the underlying matrix representation of this rotation. /// /// # Example @@ -201,7 +204,7 @@ impl Rotation { /// A mutable reference to the underlying matrix representation of this rotation. #[inline] #[deprecated(note = "Use `.matrix_mut_unchecked()` instead.")] - pub fn matrix_mut(&mut self) -> &mut SMatrix { + pub unsafe fn matrix_mut(&mut self) -> &mut SMatrix { &mut self.matrix } @@ -274,7 +277,7 @@ impl Rotation { #[must_use] pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> where - T: Zero + One + Scalar, + T: Zero + One, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { diff --git a/src/geometry/rotation_specialization.rs b/src/geometry/rotation_specialization.rs index 397f5bf6..5cd44119 100644 --- a/src/geometry/rotation_specialization.rs +++ b/src/geometry/rotation_specialization.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -284,7 +284,7 @@ where impl Arbitrary for Rotation2 where T::Element: SimdRealField, - InnerOwned: Send, + Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { @@ -976,8 +976,8 @@ where impl Arbitrary for Rotation3 where T::Element: SimdRealField, - InnerOwned: Send, - InnerOwned: Send, + Owned: Send, + Owned: Send, { #[inline] fn arbitrary(g: &mut Gen) -> Self { diff --git a/src/geometry/similarity.rs b/src/geometry/similarity.rs index 506c0896..32a19772 100755 --- a/src/geometry/similarity.rs +++ b/src/geometry/similarity.rs @@ -17,11 +17,12 @@ use simba::simd::SimdRealField; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::{AbstractRotation, Isometry, Point, Translation}; /// A similarity, i.e., an uniform scaling, followed by a rotation, followed by a translation. +#[repr(C)] #[derive(Debug)] #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] #[cfg_attr( @@ -64,7 +65,7 @@ where impl hash::Hash for Similarity where - InnerOwned>: hash::Hash, + Owned>: hash::Hash, { fn hash(&self, state: &mut H) { self.isometry.hash(state); @@ -75,7 +76,7 @@ where impl + Copy, const D: usize> Copy for Similarity where - InnerOwned>: Copy, + Owned>: Copy, { } diff --git a/src/geometry/similarity_construction.rs b/src/geometry/similarity_construction.rs index 1e2a29a0..feb5719b 100644 --- a/src/geometry/similarity_construction.rs +++ b/src/geometry/similarity_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -109,7 +109,7 @@ where T: crate::RealField + Arbitrary + Send, T::Element: crate::RealField, R: AbstractRotation + Arbitrary + Send, - InnerOwned>: Send, + Owned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/geometry/transform.rs b/src/geometry/transform.rs index a39ed75c..71544b59 100755 --- a/src/geometry/transform.rs +++ b/src/geometry/transform.rs @@ -1,6 +1,5 @@ use approx::{AbsDiffEq, RelativeEq, UlpsEq}; use std::any::Any; -use std::fmt; use std::fmt::Debug; use std::hash; use std::marker::PhantomData; @@ -8,11 +7,11 @@ use std::marker::PhantomData; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use simba::scalar::{ComplexField, RealField}; +use simba::scalar::RealField; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; use crate::base::{Const, DefaultAllocator, DimName, OMatrix, SVector}; use crate::geometry::Point; @@ -120,7 +119,7 @@ macro_rules! category_mul_impl( )*} ); -// We require stability upon multiplication. +// We require stability uppon multiplication. impl TCategoryMul for T { type Representative = T; } @@ -157,8 +156,9 @@ super_tcategory_impl!( /// /// It is stored as a matrix with dimensions `(D + 1, D + 1)`, e.g., it stores a 4x4 matrix for a /// 3D transformation. -#[repr(transparent)] -pub struct Transform +#[repr(C)] +#[derive(Debug)] +pub struct Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -167,32 +167,29 @@ where _phantom: PhantomData, } -impl hash::Hash for Transform +impl hash::Hash for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: hash::Hash, + Owned, U1>, DimNameSum, U1>>: hash::Hash, { fn hash(&self, state: &mut H) { self.matrix.hash(state); } } -/* -impl Copy for Transform +impl Copy for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Copy, + Owned, U1>, DimNameSum, U1>>: Copy, { } -*/ -impl Clone for Transform +impl Clone for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Clone, { #[inline] fn clone(&self) -> Self { @@ -200,25 +197,33 @@ where } } -impl Debug for Transform +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Transform where + T: RealField + bytemuck::Zeroable, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Debug, + OMatrix, U1>, DimNameSum, U1>>: bytemuck::Zeroable, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Transform +where + T: RealField + bytemuck::Pod, + Const: DimNameAdd, + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, + OMatrix, U1>, DimNameSum, U1>>: bytemuck::Pod, + Owned, U1>, DimNameSum, U1>>: Copy, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Transform") - .field("matrix", &self.matrix) - .finish() - } } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Transform +impl Serialize for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Serialize, + Owned, U1>, DimNameSum, U1>>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -229,11 +234,11 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T, C: TCategory, const D: usize> Deserialize<'a> for Transform +impl<'a, T: RealField, C: TCategory, const D: usize> Deserialize<'a> for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Deserialize<'a>, + Owned, U1>, DimNameSum, U1>>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where @@ -247,14 +252,14 @@ where } } -impl Eq for Transform +impl Eq for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { } -impl PartialEq for Transform +impl PartialEq for Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -265,7 +270,7 @@ where } } -impl Transform +impl Transform where Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, @@ -370,10 +375,7 @@ where #[deprecated( note = "This method is redundant with automatic `Copy` and the `.clone()` method and will be removed in a future release." )] - pub fn clone_owned(&self) -> Transform - where - T: Clone, - { + pub fn clone_owned(&self) -> Transform { Transform::from_matrix_unchecked(self.matrix.clone_owned()) } @@ -391,10 +393,7 @@ where /// ``` #[inline] #[must_use] - pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> - where - T: Clone, - { + pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> { self.matrix().clone_owned() } @@ -423,10 +422,7 @@ where /// ``` #[inline] #[must_use = "Did you mean to use try_inverse_mut()?"] - pub fn try_inverse(self) -> Option> - where - T: ComplexField, - { + pub fn try_inverse(self) -> Option> { self.matrix .try_inverse() .map(Transform::from_matrix_unchecked) @@ -452,7 +448,6 @@ where #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(self) -> Transform where - T: ComplexField, C: SubTCategoryOf, { // TODO: specialize for TAffine? @@ -484,10 +479,7 @@ where /// assert!(!t.try_inverse_mut()); /// ``` #[inline] - pub fn try_inverse_mut(&mut self) -> bool - where - T: ComplexField, - { + pub fn try_inverse_mut(&mut self) -> bool { self.matrix.try_inverse_mut() } @@ -511,7 +503,6 @@ where #[inline] pub fn inverse_mut(&mut self) where - T: ComplexField, C: SubTCategoryOf, { let _ = self.matrix.try_inverse_mut(); @@ -552,8 +543,8 @@ where Const: DimNameAdd, C: SubTCategoryOf, DefaultAllocator: Allocator, U1>, DimNameSum, U1>> - + Allocator, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Clone, + + Allocator, U1>>, // + Allocator + // + Allocator { /// Transform the given point by the inverse of this transformation. /// This may be cheaper than inverting the transformation and transforming diff --git a/src/geometry/transform_ops.rs b/src/geometry/transform_ops.rs index 8b4be18f..94ef4ab3 100644 --- a/src/geometry/transform_ops.rs +++ b/src/geometry/transform_ops.rs @@ -9,7 +9,6 @@ use simba::scalar::{ClosedAdd, ClosedMul, RealField, SubsetOf}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; -use crate::storage::InnerOwned; use crate::geometry::{ Isometry, Point, Rotation, Similarity, SubTCategoryOf, SuperTCategoryOf, TAffine, TCategory, @@ -373,8 +372,7 @@ md_impl_all!( const D; for CA, CB; where Const: DimNameAdd, CA: TCategoryMul, CB: SubTCategoryOf, - DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - Transform: Clone; // There's probably a better bound here. + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>; self: Transform, rhs: Transform, Output = Transform; [val val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * rhs.inverse() }; [ref val] => #[allow(clippy::suspicious_arithmetic_impl)] { self * rhs.inverse() }; @@ -628,8 +626,7 @@ md_assign_impl_all!( const D; for CA, CB; where Const: DimNameAdd, CA: SuperTCategoryOf, CB: SubTCategoryOf, - DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, - InnerOwned, U1>, DimNameSum, U1>>: Clone; + DefaultAllocator: Allocator, U1>, DimNameSum, U1>>; self: Transform, rhs: Transform; [val] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.inverse() }; [ref] => #[allow(clippy::suspicious_op_assign_impl)] { *self *= rhs.clone().inverse() }; diff --git a/src/geometry/translation.rs b/src/geometry/translation.rs index 6f983fec..1dd6f6d5 100755 --- a/src/geometry/translation.rs +++ b/src/geometry/translation.rs @@ -15,13 +15,13 @@ use simba::scalar::{ClosedAdd, ClosedNeg, ClosedSub}; use crate::base::allocator::Allocator; use crate::base::dimension::{DimNameAdd, DimNameSum, U1}; -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; use crate::base::{Const, DefaultAllocator, OMatrix, SVector, Scalar}; use crate::geometry::Point; /// A translation. -#[repr(transparent)] +#[repr(C)] #[derive(Debug)] pub struct Translation { /// The translation coordinates, i.e., how much is added to a point's coordinates when it is @@ -29,20 +29,20 @@ pub struct Translation { pub vector: SVector, } -impl hash::Hash for Translation +impl hash::Hash for Translation where - InnerOwned>: hash::Hash, + Owned>: hash::Hash, { fn hash(&self, state: &mut H) { self.vector.hash(state) } } -impl Copy for Translation {} +impl Copy for Translation {} -impl Clone for Translation +impl Clone for Translation where - InnerOwned>: Clone, + Owned>: Clone, { #[inline] fn clone(&self) -> Self { @@ -69,6 +69,7 @@ where #[cfg(feature = "abomonation-serialize")] impl Abomonation for Translation where + T: Scalar, SVector: Abomonation, { unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> { @@ -85,9 +86,9 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl Serialize for Translation +impl Serialize for Translation where - InnerOwned>: Serialize, + Owned>: Serialize, { fn serialize(&self, serializer: S) -> Result where @@ -98,9 +99,9 @@ where } #[cfg(feature = "serde-serialize-no-std")] -impl<'a, T, const D: usize> Deserialize<'a> for Translation +impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Translation where - InnerOwned>: Deserialize<'a>, + Owned>: Deserialize<'a>, { fn deserialize(deserializer: Des) -> Result where @@ -155,7 +156,7 @@ mod rkyv_impl { } } -impl Translation { +impl Translation { /// Creates a new translation from the given vector. #[inline] #[deprecated(note = "Use `::from` instead.")] @@ -181,7 +182,7 @@ impl Translation { #[must_use = "Did you mean to use inverse_mut()?"] pub fn inverse(&self) -> Translation where - T: ClosedNeg + Scalar, + T: ClosedNeg, { Translation::from(-&self.vector) } @@ -208,7 +209,7 @@ impl Translation { #[must_use] pub fn to_homogeneous(&self) -> OMatrix, U1>, DimNameSum, U1>> where - T: Zero + One + Scalar, + T: Zero + One, Const: DimNameAdd, DefaultAllocator: Allocator, U1>, DimNameSum, U1>>, { @@ -239,7 +240,7 @@ impl Translation { #[inline] pub fn inverse_mut(&mut self) where - T: ClosedNeg + Scalar, + T: ClosedNeg, { self.vector.neg_mut() } @@ -279,16 +280,16 @@ impl Translation { } } -impl Eq for Translation {} +impl Eq for Translation {} -impl PartialEq for Translation { +impl PartialEq for Translation { #[inline] fn eq(&self, right: &Translation) -> bool { self.vector == right.vector } } -impl AbsDiffEq for Translation +impl AbsDiffEq for Translation where T::Epsilon: Copy, { @@ -305,7 +306,7 @@ where } } -impl RelativeEq for Translation +impl RelativeEq for Translation where T::Epsilon: Copy, { @@ -326,7 +327,7 @@ where } } -impl UlpsEq for Translation +impl UlpsEq for Translation where T::Epsilon: Copy, { diff --git a/src/geometry/translation_construction.rs b/src/geometry/translation_construction.rs index a9f501be..5371b648 100644 --- a/src/geometry/translation_construction.rs +++ b/src/geometry/translation_construction.rs @@ -1,5 +1,5 @@ #[cfg(feature = "arbitrary")] -use crate::base::storage::InnerOwned; +use crate::base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; @@ -77,7 +77,7 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for Translation where - InnerOwned>: Send, + Owned>: Send, { #[inline] fn arbitrary(rng: &mut Gen) -> Self { diff --git a/src/geometry/translation_conversion.rs b/src/geometry/translation_conversion.rs index bed39f7a..d443a2f4 100644 --- a/src/geometry/translation_conversion.rs +++ b/src/geometry/translation_conversion.rs @@ -27,7 +27,8 @@ use crate::Point; impl SubsetOf> for Translation where - T2: SupersetOf, + T1: Scalar, + T2: Scalar + SupersetOf, { #[inline] fn to_superset(&self) -> Translation { @@ -192,14 +193,14 @@ where } } -impl From>> for Translation { +impl From>> for Translation { #[inline] fn from(vector: OVector>) -> Self { Translation { vector } } } -impl From<[T; D]> for Translation { +impl From<[T; D]> for Translation { #[inline] fn from(coords: [T; D]) -> Self { Translation { @@ -208,17 +209,14 @@ impl From<[T; D]> for Translation { } } -impl From> for Translation { +impl From> for Translation { #[inline] fn from(pt: Point) -> Self { Translation { vector: pt.coords } } } -impl From> for [T; D] -where - T: Clone, -{ +impl From> for [T; D] { #[inline] fn from(t: Translation) -> Self { t.vector.into() diff --git a/src/geometry/translation_coordinates.rs b/src/geometry/translation_coordinates.rs index bda57f59..80267e06 100644 --- a/src/geometry/translation_coordinates.rs +++ b/src/geometry/translation_coordinates.rs @@ -18,14 +18,14 @@ macro_rules! deref_impl( #[inline] fn deref(&self) -> &Self::Target { - unsafe { &*(self as *const _ as *const _) } + unsafe { &*(self as *const Translation as *const Self::Target) } } } impl DerefMut for Translation { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut _ as *mut _) } + unsafe { &mut *(self as *mut Translation as *mut Self::Target) } } } } diff --git a/src/lib.rs b/src/lib.rs index e21f0709..650a601a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -77,12 +77,12 @@ an optimized set of tools for computer graphics and physics. Those features incl unused_parens, unused_qualifications, unused_results, - missing_docs, rust_2018_idioms, rust_2018_compatibility, future_incompatible, missing_copy_implementations )] +// #![deny(missing_docs)] // XXX: deny that #![doc( html_favicon_url = "https://nalgebra.org/img/favicon.ico", html_root_url = "https://docs.rs/nalgebra/0.25.0" diff --git a/src/linalg/balancing.rs b/src/linalg/balancing.rs index f4f8b659..15679e2b 100644 --- a/src/linalg/balancing.rs +++ b/src/linalg/balancing.rs @@ -5,7 +5,6 @@ use std::ops::{DivAssign, MulAssign}; use crate::allocator::Allocator; use crate::base::dimension::Dim; -use crate::base::storage::Storage; use crate::base::{Const, DefaultAllocator, OMatrix, OVector}; /// Applies in-place a modified Parlett and Reinsch matrix balancing with 2-norm to the matrix and returns @@ -18,7 +17,7 @@ where { assert!(matrix.is_square(), "Unable to balance a non-square matrix."); - let dim = matrix.data.shape().0; + let dim = matrix.shape_generic().0; let radix: T = crate::convert(2.0f64); let mut d = OVector::from_element_generic(dim, Const::<1>, T::one()); diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index d4b6a1e3..e269b4a0 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -1,17 +1,14 @@ -use std::fmt; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::dimension::{Const, Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; -use crate::Dynamic; use simba::scalar::ComplexField; use crate::geometry::Reflection; use crate::linalg::householder; +use std::mem::MaybeUninit; /// The bidiagonalization of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -35,6 +32,7 @@ use crate::linalg::householder; OVector>: Deserialize<'de>, OVector, U1>>: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct Bidiagonal, C: Dim> where DimMinimum: DimSub, @@ -52,59 +50,17 @@ where upper_diagonal: bool, } -impl, C: Dim> Clone for Bidiagonal -where - DimMinimum: DimSub, - DefaultAllocator: Allocator - + Allocator> - + Allocator, U1>>, - InnerOwned: Clone, - InnerOwned>: Clone, - InnerOwned, U1>>: Clone, -{ - fn clone(&self) -> Self { - Self { - uv: self.uv.clone(), - diagonal: self.diagonal.clone(), - off_diagonal: self.off_diagonal.clone(), - upper_diagonal: self.upper_diagonal, - } - } -} - -/* impl, C: Dim> Copy for Bidiagonal where DimMinimum: DimSub, DefaultAllocator: Allocator + Allocator> + Allocator, U1>>, - InnerOwned: Copy, - InnerOwned>: Copy, - InnerOwned, U1>>: Copy, + OMatrix: Copy, + OVector>: Copy, + OVector, U1>>: Copy, { } -*/ - -impl, C: Dim> fmt::Debug for Bidiagonal -where - DimMinimum: DimSub, - DefaultAllocator: Allocator - + Allocator> - + Allocator, U1>>, - InnerOwned: fmt::Debug, - InnerOwned>: fmt::Debug, - InnerOwned, U1>>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Bidiagonal") - .field("uv", &self.uv) - .field("diagonal", &self.diagonal) - .field("off_diagonal", &self.off_diagonal) - .field("upper_diagonal", &self.upper_diagonal) - .finish() - } -} impl, C: Dim> Bidiagonal where @@ -117,7 +73,7 @@ where { /// Computes the Bidiagonal decomposition using householder reflections. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let dim = min_nrows_ncols.value(); assert!( @@ -125,80 +81,70 @@ where "Cannot compute the bidiagonalization of an empty matrix." ); - let mut diagonal = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); - let mut off_diagonal = - Matrix::new_uninitialized_generic(min_nrows_ncols.sub(Const::<1>), Const::<1>); - let mut axis_packed = Matrix::new_uninitialized_generic(ncols, Const::<1>); - let mut work = Matrix::new_uninitialized_generic(nrows, Const::<1>); + let mut diagonal = Matrix::uninit(min_nrows_ncols, Const::<1>); + let mut off_diagonal = Matrix::uninit(min_nrows_ncols.sub(Const::<1>), Const::<1>); + let mut axis_packed = Matrix::zeros_generic(ncols, Const::<1>); + let mut work = Matrix::zeros_generic(nrows, Const::<1>); let upper_diagonal = nrows.value() >= ncols.value(); - - // Safety: all pointers involved are valid for writes, aligned, and uninitialized. - unsafe { - if upper_diagonal { - for ite in 0..dim - 1 { - householder::clear_column_unchecked( - &mut matrix, - diagonal[ite].as_mut_ptr(), - ite, - 0, - None, - ); - householder::clear_row_unchecked( - &mut matrix, - off_diagonal[ite].as_mut_ptr(), - &mut axis_packed, - &mut work, - ite, - 1, - ); - } - - householder::clear_column_unchecked( + if upper_diagonal { + for ite in 0..dim - 1 { + diagonal[ite] = MaybeUninit::new(householder::clear_column_unchecked( &mut matrix, - diagonal[dim - 1].as_mut_ptr(), - dim - 1, + ite, 0, None, - ); - } else { - for ite in 0..dim - 1 { - householder::clear_row_unchecked( - &mut matrix, - diagonal[ite].as_mut_ptr(), - &mut axis_packed, - &mut work, - ite, - 0, - ); - householder::clear_column_unchecked( - &mut matrix, - off_diagonal[ite].as_mut_ptr(), - ite, - 1, - None, - ); - } - - householder::clear_row_unchecked( + )); + off_diagonal[ite] = MaybeUninit::new(householder::clear_row_unchecked( &mut matrix, - diagonal[dim - 1].as_mut_ptr(), &mut axis_packed, &mut work, - dim - 1, - 0, - ); + ite, + 1, + )); } + + diagonal[dim - 1] = MaybeUninit::new(householder::clear_column_unchecked( + &mut matrix, + dim - 1, + 0, + None, + )); + } else { + for ite in 0..dim - 1 { + diagonal[ite] = MaybeUninit::new(householder::clear_row_unchecked( + &mut matrix, + &mut axis_packed, + &mut work, + ite, + 0, + )); + off_diagonal[ite] = MaybeUninit::new(householder::clear_column_unchecked( + &mut matrix, + ite, + 1, + None, + )); + } + + diagonal[dim - 1] = MaybeUninit::new(householder::clear_row_unchecked( + &mut matrix, + &mut axis_packed, + &mut work, + dim - 1, + 0, + )); } - // Safety: all values have been initialized. - unsafe { - Bidiagonal { - uv: matrix, - diagonal: diagonal.assume_init(), - off_diagonal: off_diagonal.assume_init(), - upper_diagonal, - } + // Safety: diagonal and off_diagonal have been fully initialized. + let (diagonal, off_diagonal) = + unsafe { (diagonal.assume_init(), off_diagonal.assume_init()) }; + + Bidiagonal { + uv: matrix, + diagonal, + off_diagonal, + upper_diagonal, } } @@ -245,7 +191,7 @@ where where DefaultAllocator: Allocator, DimMinimum>, { - let (nrows, ncols) = self.uv.data.shape(); + let (nrows, ncols) = self.uv.shape_generic(); let d = nrows.min(ncols); let mut res = OMatrix::identity_generic(d, d); @@ -265,7 +211,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.uv.data.shape(); + let (nrows, ncols) = self.uv.shape_generic(); let mut res = Matrix::identity_generic(nrows, nrows.min(ncols)); let dim = self.diagonal.len(); @@ -294,23 +240,21 @@ where #[must_use] pub fn v_t(&self) -> OMatrix, C> where - DefaultAllocator: Allocator, C> + Allocator, + DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.uv.data.shape(); + let (nrows, ncols) = self.uv.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut res = Matrix::identity_generic(min_nrows_ncols, ncols); - let mut work = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); - let mut axis_packed = Matrix::new_uninitialized_generic(ncols, Const::<1>); + let mut work = Matrix::zeros_generic(min_nrows_ncols, Const::<1>); + let mut axis_packed = Matrix::zeros_generic(ncols, Const::<1>); let shift = self.axis_shift().1; for i in (0..min_nrows_ncols.value() - shift).rev() { let axis = self.uv.slice_range(i, i + shift..); let mut axis_packed = axis_packed.rows_range_mut(i + shift..); - axis_packed.tr_copy_init_from(&axis); - let axis_packed = unsafe { axis_packed.slice_assume_init() }; - + axis_packed.tr_copy_from(&axis); // TODO: sometimes, the axis might have a zero magnitude. let refl = Reflection::new(Unit::new_unchecked(axis_packed), T::zero()); @@ -404,7 +348,7 @@ where // assert!(self.uv.is_square(), "Bidiagonal inverse: unable to compute the inverse of a non-square matrix."); // // // TODO: is there a less naive method ? -// let (nrows, ncols) = self.uv.data.shape(); +// let (nrows, ncols) = self.uv.shape_generic(); // let mut res = OMatrix::identity_generic(nrows, ncols); // self.solve_mut(&mut res); // res diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 2abd8242..47939311 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -1,6 +1,3 @@ -use std::fmt; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -12,7 +9,7 @@ use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, Matrix, OMatrix, Vector}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimAdd, DimDiff, DimSub, DimSum, U1}; -use crate::storage::{InnerOwned, Storage, StorageMut}; +use crate::storage::{Storage, StorageMut}; /// The Cholesky decomposition of a symmetric-definite-positive matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -26,6 +23,7 @@ use crate::storage::{InnerOwned, Storage, StorageMut}; serde(bound(deserialize = "DefaultAllocator: Allocator, OMatrix: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct Cholesky where DefaultAllocator: Allocator, @@ -33,38 +31,12 @@ where chol: OMatrix, } -/* impl Copy for Cholesky where DefaultAllocator: Allocator, - InnerOwned: Copy, + OMatrix: Copy, { } -*/ - -impl Clone for Cholesky -where - DefaultAllocator: Allocator, - InnerOwned: Clone, -{ - fn clone(&self) -> Self { - Self { - chol: self.chol.clone(), - } - } -} - -impl fmt::Debug for Cholesky -where - DefaultAllocator: Allocator, - InnerOwned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Cholesky") - .field("chol", &self.chol) - .finish() - } -} impl Cholesky where @@ -164,7 +136,7 @@ where /// Computes the inverse of the decomposed matrix. #[must_use] pub fn inverse(&self) -> OMatrix { - let shape = self.chol.data.shape(); + let shape = self.chol.shape_generic(); let mut res = OMatrix::identity_generic(shape.0, shape.1); self.solve_mut(&mut res); @@ -254,8 +226,6 @@ where DefaultAllocator: Allocator, DimSum> + Allocator, ShapeConstraint: SameNumberOfRows>, { - // TODO: check that MaybeUninit manipulations are sound! - let mut col = col.into_owned(); // for an explanation of the formulas, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition let n = col.nrows(); @@ -267,20 +237,19 @@ where assert!(j < n, "j needs to be within the bound of the new matrix."); // loads the data into a new matrix with an additional jth row/column - let mut chol = Matrix::new_uninitialized_generic( - self.chol.data.shape().0.add(Const::<1>), - self.chol.data.shape().1.add(Const::<1>), + // TODO: would it be worth it to avoid the zero-initialization? + let mut chol = Matrix::zeros_generic( + self.chol.shape_generic().0.add(Const::<1>), + self.chol.shape_generic().1.add(Const::<1>), ); - - // TODO: checked that every entry is initialized EXACTLY once. chol.slice_range_mut(..j, ..j) - .copy_init_from(&self.chol.slice_range(..j, ..j)); + .copy_from(&self.chol.slice_range(..j, ..j)); chol.slice_range_mut(..j, j + 1..) - .copy_init_from(&self.chol.slice_range(..j, j..)); + .copy_from(&self.chol.slice_range(..j, j..)); chol.slice_range_mut(j + 1.., ..j) - .copy_init_from(&self.chol.slice_range(j.., ..j)); + .copy_from(&self.chol.slice_range(j.., ..j)); chol.slice_range_mut(j + 1.., j + 1..) - .copy_init_from(&self.chol.slice_range(j.., j..)); + .copy_from(&self.chol.slice_range(j.., j..)); // update the jth row let top_left_corner = self.chol.slice_range(..j, ..j); @@ -296,7 +265,7 @@ where // update the center element let center_element = T::sqrt(col_j - T::from_real(new_rowj_adjoint.norm_squared())); - chol[(j, j)] = MaybeUninit::new(center_element); + chol[(j, j)] = center_element; // update the jth column let bottom_left_corner = self.chol.slice_range(j.., ..j); @@ -307,9 +276,7 @@ where &new_rowj_adjoint, T::one() / center_element, ); - chol.slice_range_mut(j + 1.., j).copy_init_from(&new_colj); - - let mut chol = unsafe { chol.assume_init() }; + chol.slice_range_mut(j + 1.., j).copy_from(&new_colj); // update the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j + 1.., j + 1..); @@ -330,27 +297,24 @@ where D: DimSub, DefaultAllocator: Allocator, DimDiff> + Allocator, { - // TODO: check that MaybeUninit manipulations are sound! - let n = self.chol.nrows(); assert!(n > 0, "The matrix needs at least one column."); assert!(j < n, "j needs to be within the bound of the matrix."); // loads the data into a new matrix except for the jth row/column - let mut chol = Matrix::new_uninitialized_generic( - self.chol.data.shape().0.sub(Const::<1>), - self.chol.data.shape().1.sub(Const::<1>), + // TODO: would it be worth it to avoid this zero initialization? + let mut chol = Matrix::zeros_generic( + self.chol.shape_generic().0.sub(Const::<1>), + self.chol.shape_generic().1.sub(Const::<1>), ); - chol.slice_range_mut(..j, ..j) - .copy_init_from(&self.chol.slice_range(..j, ..j)); + .copy_from(&self.chol.slice_range(..j, ..j)); chol.slice_range_mut(..j, j..) - .copy_init_from(&self.chol.slice_range(..j, j + 1..)); + .copy_from(&self.chol.slice_range(..j, j + 1..)); chol.slice_range_mut(j.., ..j) - .copy_init_from(&self.chol.slice_range(j + 1.., ..j)); + .copy_from(&self.chol.slice_range(j + 1.., ..j)); chol.slice_range_mut(j.., j..) - .copy_init_from(&self.chol.slice_range(j + 1.., j + 1..)); - let mut chol = unsafe { chol.assume_init() }; + .copy_from(&self.chol.slice_range(j + 1.., j + 1..)); // updates the bottom right corner let mut bottom_right_corner = chol.slice_range_mut(j.., j..); @@ -366,12 +330,14 @@ where /// /// This helper method is called by `rank_one_update` but also `insert_column` and `remove_column` /// where it is used on a square slice of the decomposition - fn xx_rank_one_update( + fn xx_rank_one_update( chol: &mut Matrix, x: &mut Vector, sigma: T::RealField, ) where //T: ComplexField, + Dm: Dim, + Rx: Dim, Sm: StorageMut, Sx: StorageMut, { diff --git a/src/linalg/col_piv_qr.rs b/src/linalg/col_piv_qr.rs index 438ee83a..f5c61336 100644 --- a/src/linalg/col_piv_qr.rs +++ b/src/linalg/col_piv_qr.rs @@ -6,11 +6,12 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{Const, DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimMin, DimMinimum}; -use crate::storage::{Storage, StorageMut}; +use crate::storage::StorageMut; use crate::ComplexField; use crate::geometry::Reflection; use crate::linalg::{householder, PermutationSequence}; +use std::mem::MaybeUninit; /// The QR decomposition (with column pivoting) of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -30,6 +31,7 @@ use crate::linalg::{householder, PermutationSequence}; PermutationSequence>: Deserialize<'de>, OVector>: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct ColPivQR, C: Dim> where DefaultAllocator: Allocator @@ -52,24 +54,6 @@ where { } -impl, C: Dim> Clone for ColPivQR -where - DefaultAllocator: Allocator - + Allocator> - + Allocator<(usize, usize), DimMinimum>, - OMatrix: Clone, - PermutationSequence>: Clone, - OVector>: Clone, -{ - fn clone(&self) -> Self { - Self { - col_piv_qr: self.col_piv_qr.clone(), - p: self.p.clone(), - diag: self.diag.clone(), - } - } -} - impl, C: Dim> ColPivQR where DefaultAllocator: Allocator @@ -79,42 +63,37 @@ where { /// Computes the `ColPivQR` decomposition using householder reflections. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); - let mut diag = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); - if min_nrows_ncols.value() == 0 { - // Safety: there's no (uninitialized) values. - unsafe { - return ColPivQR { - col_piv_qr: matrix, - p, - diag: diag.assume_init(), - }; + return ColPivQR { + col_piv_qr: matrix, + p, + diag: Matrix::zeros_generic(min_nrows_ncols, Const::<1>), }; } + let mut diag = Matrix::uninit(min_nrows_ncols, Const::<1>); + for i in 0..min_nrows_ncols.value() { let piv = matrix.slice_range(i.., i..).icamax_full(); let col_piv = piv.1 + i; matrix.swap_columns(i, col_piv); p.append_permutation(i, col_piv); - // Safety: the pointer is valid for writes, aligned, and uninitialized. - unsafe { - householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); - } + diag[i] = + MaybeUninit::new(householder::clear_column_unchecked(&mut matrix, i, 0, None)); } - // Safety: all values have been initialized. - unsafe { - ColPivQR { - col_piv_qr: matrix, - p, - diag: diag.assume_init(), - } + // Safety: diag is now fully initialized. + let diag = unsafe { diag.assume_init() }; + + ColPivQR { + col_piv_qr: matrix, + p, + diag, } } @@ -125,7 +104,7 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); let mut res = self .col_piv_qr .rows_generic(0, nrows.min(ncols)) @@ -142,7 +121,7 @@ where where DefaultAllocator: Reallocator, C>, { - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); let mut res = self .col_piv_qr .resize_generic(nrows.min(ncols), ncols, T::zero()); @@ -157,7 +136,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); // NOTE: we could build the identity matrix and call q_mul on it. // Instead we don't so that we take in account the matrix sparseness. @@ -320,7 +299,7 @@ where ); // TODO: is there a less naive method ? - let (nrows, ncols) = self.col_piv_qr.data.shape(); + let (nrows, ncols) = self.col_piv_qr.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.solve_mut(&mut res) { diff --git a/src/linalg/convolution.rs b/src/linalg/convolution.rs index 36cea3a0..21a32dbc 100644 --- a/src/linalg/convolution.rs +++ b/src/linalg/convolution.rs @@ -38,7 +38,7 @@ impl> Vector { .data .shape() .0 - .add(kernel.data.shape().0) + .add(kernel.shape_generic().0) .sub(Const::<1>); let mut conv = OVector::zeros_generic(result_len, Const::<1>); @@ -92,7 +92,7 @@ impl> Vector { .shape() .0 .add(Const::<1>) - .sub(kernel.data.shape().0); + .sub(kernel.shape_generic().0); let mut conv = OVector::zeros_generic(result_len, Const::<1>); for i in 0..(vec - ker + 1) { @@ -126,7 +126,7 @@ impl> Vector { panic!("convolve_same expects `self.len() >= kernel.len() > 0`, received {} and {} respectively.",vec,ker); } - let mut conv = OVector::zeros_generic(self.data.shape().0, Const::<1>); + let mut conv = OVector::zeros_generic(self.shape_generic().0, Const::<1>); for i in 0..vec { for j in 0..ker { diff --git a/src/linalg/exp.rs b/src/linalg/exp.rs index 76e2ddf5..e7751af2 100644 --- a/src/linalg/exp.rs +++ b/src/linalg/exp.rs @@ -4,12 +4,9 @@ use crate::{ base::{ allocator::Allocator, dimension::{Const, Dim, DimMin, DimMinimum}, - storage::Storage, DefaultAllocator, }, - convert, - storage::InnerOwned, - try_convert, ComplexField, OMatrix, RealField, + convert, try_convert, ComplexField, OMatrix, RealField, }; use crate::num::Zero; @@ -49,7 +46,7 @@ where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, { fn new(a: OMatrix, use_exact_norm: bool) -> Self { - let (nrows, ncols) = a.data.shape(); + let (nrows, ncols) = a.shape_generic(); ExpmPadeHelper { use_exact_norm, ident: OMatrix::::identity_generic(nrows, ncols), @@ -350,7 +347,7 @@ where D: Dim, DefaultAllocator: Allocator + Allocator, { - let nrows = a.data.shape().0; + let nrows = a.shape_generic().0; let mut v = crate::OVector::::repeat_generic(nrows, Const::<1>, convert(1.0)); let m = a.transpose(); @@ -435,7 +432,6 @@ where + Allocator + Allocator + Allocator, - InnerOwned: Clone, { /// Computes exponential of this matrix #[must_use] diff --git a/src/linalg/full_piv_lu.rs b/src/linalg/full_piv_lu.rs index 71e0755e..20033c3c 100644 --- a/src/linalg/full_piv_lu.rs +++ b/src/linalg/full_piv_lu.rs @@ -1,5 +1,3 @@ -use std::fmt; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -29,7 +27,8 @@ use crate::linalg::PermutationSequence; OMatrix: Deserialize<'de>, PermutationSequence>: Deserialize<'de>")) )] -pub struct FullPivLU, C: Dim> +#[derive(Clone, Debug)] +pub struct FullPivLU, C: Dim> where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, { @@ -41,41 +40,11 @@ where impl, C: Dim> Copy for FullPivLU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: Copy, OMatrix: Copy, + PermutationSequence>: Copy, { } -impl, C: Dim> Clone for FullPivLU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: Clone, - OMatrix: Clone, -{ - fn clone(&self) -> Self { - Self { - lu: self.lu.clone(), - p: self.p.clone(), - q: self.q.clone(), - } - } -} - -impl, C: Dim> fmt::Debug for FullPivLU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: fmt::Debug, - OMatrix: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FullPivLU") - .field("lu", &self.lu) - .field("p", &self.p) - .field("q", &self.q) - .finish() - } -} - impl, C: Dim> FullPivLU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, @@ -84,7 +53,7 @@ where /// /// This effectively computes `P, L, U, Q` such that `P * matrix * Q = LU`. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); @@ -132,7 +101,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -146,7 +115,7 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); self.lu.rows_generic(0, nrows.min(ncols)).upper_triangle() } @@ -253,7 +222,7 @@ where "FullPivLU inverse: unable to compute the inverse of a non-square matrix." ); - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.solve_mut(&mut res) { diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index 3874bf77..1e266b16 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -1,17 +1,14 @@ -use std::fmt; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; use crate::dimension::{Const, DimDiff, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; -use crate::Matrix; use simba::scalar::ComplexField; use crate::linalg::householder; +use crate::Matrix; +use std::mem::MaybeUninit; /// Hessenberg decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -29,6 +26,7 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct Hessenberg> where DefaultAllocator: Allocator + Allocator>, @@ -37,43 +35,13 @@ where subdiag: OVector>, } -/* impl> Copy for Hessenberg where DefaultAllocator: Allocator + Allocator>, - InnerOwned: Copy, - InnerOwned>: Copy, + OMatrix: Copy, + OVector>: Copy, { } -*/ - -impl> Clone for Hessenberg -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: Clone, - InnerOwned>: Clone, -{ - fn clone(&self) -> Self { - Self { - hess: self.hess.clone(), - subdiag: self.subdiag.clone(), - } - } -} - -impl> fmt::Debug for Hessenberg -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: fmt::Debug, - InnerOwned>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Hessenberg") - .field("hess", &self.hess) - .field("subdiag", &self.subdiag) - .finish() - } -} impl> Hessenberg where @@ -81,7 +49,7 @@ where { /// Computes the Hessenberg decomposition using householder reflections. pub fn new(hess: OMatrix) -> Self { - let mut work = OVector::new_uninitialized_generic(hess.data.shape().0, Const::<1>); + let mut work = Matrix::zeros_generic(hess.shape_generic().0, Const::<1>); Self::new_with_workspace(hess, &mut work) } @@ -89,16 +57,13 @@ where /// /// The workspace containing `D` elements must be provided but its content does not have to be /// initialized. - pub fn new_with_workspace( - mut hess: OMatrix, - work: &mut OVector, D>, - ) -> Self { + pub fn new_with_workspace(mut hess: OMatrix, work: &mut OVector) -> Self { assert!( hess.is_square(), "Cannot compute the hessenberg decomposition of a non-square matrix." ); - let dim = hess.data.shape().0; + let dim = hess.shape_generic().0; assert!( dim.value() != 0, @@ -110,38 +75,27 @@ where "Hessenberg: invalid workspace size." ); - let mut subdiag = Matrix::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); - if dim.value() == 0 { - // Safety: there's no (uninitialized) values. - unsafe { - return Self { - hess, - subdiag: subdiag.assume_init(), - }; - } + return Hessenberg { + hess, + subdiag: Matrix::zeros_generic(dim.sub(Const::<1>), Const::<1>), + }; } + let mut subdiag = Matrix::uninit(dim.sub(Const::<1>), Const::<1>); + for ite in 0..dim.value() - 1 { - // Safety: the pointer is valid for writes, aligned, and uninitialized. - unsafe { - householder::clear_column_unchecked( - &mut hess, - subdiag[ite].as_mut_ptr(), - ite, - 1, - Some(work), - ); - } + subdiag[ite] = MaybeUninit::new(householder::clear_column_unchecked( + &mut hess, + ite, + 1, + Some(work), + )); } - // Safety: all values have been initialized. - unsafe { - Self { - hess, - subdiag: subdiag.assume_init(), - } - } + // Safety: subdiag is now fully initialized. + let subdiag = unsafe { subdiag.assume_init() }; + Hessenberg { hess, subdiag } } /// Retrieves `(q, h)` with `q` the orthogonal matrix of this decomposition and `h` the @@ -170,10 +124,7 @@ where /// This is less efficient than `.unpack_h()` as it allocates a new matrix. #[inline] #[must_use] - pub fn h(&self) -> OMatrix - where - InnerOwned: Clone, - { + pub fn h(&self) -> OMatrix { let dim = self.hess.nrows(); let mut res = self.hess.clone(); res.fill_lower_triangle(T::zero(), 2); diff --git a/src/linalg/householder.rs b/src/linalg/householder.rs index 06a50d8e..6d20205d 100644 --- a/src/linalg/householder.rs +++ b/src/linalg/householder.rs @@ -1,11 +1,9 @@ //! Construction of householder elementary reflections. -use std::mem::MaybeUninit; - use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector, Unit, Vector}; use crate::dimension::Dim; -use crate::storage::{Storage, StorageMut}; +use crate::storage::StorageMut; use num::Zero; use simba::scalar::ComplexField; @@ -46,29 +44,22 @@ pub fn reflection_axis_mut>( /// Uses an householder reflection to zero out the `icol`-th column, starting with the `shift + 1`-th /// subdiagonal element. /// -/// # Safety -/// Behavior is undefined if any of the following conditions are violated: -/// -/// - `diag_elt` must be valid for writes. -/// - `diag_elt` must be properly aligned. -/// -/// Furthermore, if `diag_elt` was previously initialized, this method will leak -/// its data. +/// Returns the signed norm of the column. #[doc(hidden)] -pub unsafe fn clear_column_unchecked( +#[must_use] +pub fn clear_column_unchecked( matrix: &mut OMatrix, - diag_elt: *mut T, icol: usize, shift: usize, - bilateral: Option<&mut OVector, R>>, -) where + bilateral: Option<&mut OVector>, +) -> T +where DefaultAllocator: Allocator + Allocator, { let (mut left, mut right) = matrix.columns_range_pair_mut(icol, icol + 1..); let mut axis = left.rows_range_mut(icol + shift..); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); - diag_elt.write(reflection_norm); if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); @@ -78,38 +69,32 @@ pub unsafe fn clear_column_unchecked( } refl.reflect_with_sign(&mut right.rows_range_mut(icol + shift..), sign.conjugate()); } + + reflection_norm } /// Uses an householder reflection to zero out the `irow`-th row, ending before the `shift + 1`-th /// superdiagonal element. /// -/// # Safety -/// Behavior is undefined if any of the following conditions are violated: -/// -/// - `diag_elt` must be valid for writes. -/// - `diag_elt` must be properly aligned. -/// -/// Furthermore, if `diag_elt` was previously initialized, this method will leak -/// its data. +/// Returns the signed norm of the column. #[doc(hidden)] -pub unsafe fn clear_row_unchecked( +#[must_use] +pub fn clear_row_unchecked( matrix: &mut OMatrix, - diag_elt: *mut T, - axis_packed: &mut OVector, C>, - work: &mut OVector, R>, + axis_packed: &mut OVector, + work: &mut OVector, irow: usize, shift: usize, -) where +) -> T +where DefaultAllocator: Allocator + Allocator + Allocator, { let (mut top, mut bottom) = matrix.rows_range_pair_mut(irow, irow + 1..); let mut axis = axis_packed.rows_range_mut(irow + shift..); - axis.tr_copy_init_from(&top.columns_range(irow + shift..)); - let mut axis = axis.assume_init_mut(); + axis.tr_copy_from(&top.columns_range(irow + shift..)); let (reflection_norm, not_zero) = reflection_axis_mut(&mut axis); axis.conjugate_mut(); // So that reflect_rows actually cancels the first row. - diag_elt.write(reflection_norm); if not_zero { let refl = Reflection::new(Unit::new_unchecked(axis), T::zero()); @@ -123,6 +108,8 @@ pub unsafe fn clear_row_unchecked( } else { top.columns_range_mut(irow + shift..).tr_copy_from(&axis); } + + reflection_norm } /// Computes the orthogonal transformation described by the elementary reflector axii stored on @@ -134,7 +121,7 @@ where DefaultAllocator: Allocator, { assert!(m.is_square()); - let dim = m.data.shape().0; + let dim = m.shape_generic().0; // NOTE: we could build the identity matrix and call p_mult on it. // Instead we don't so that we take in account the matrix sparseness. diff --git a/src/linalg/lu.rs b/src/linalg/lu.rs index 6fc0d9fa..0e3be559 100644 --- a/src/linalg/lu.rs +++ b/src/linalg/lu.rs @@ -1,6 +1,3 @@ -use std::fmt; -use std::mem; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -8,8 +5,9 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Dim, DimMin, DimMinimum}; -use crate::storage::{InnerOwned, Storage, StorageMut}; +use crate::storage::{Storage, StorageMut}; use simba::scalar::{ComplexField, Field}; +use std::mem; use crate::linalg::PermutationSequence; @@ -29,7 +27,8 @@ use crate::linalg::PermutationSequence; OMatrix: Deserialize<'de>, PermutationSequence>: Deserialize<'de>")) )] -pub struct LU, C: Dim> +#[derive(Clone, Debug)] +pub struct LU, C: Dim> where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, { @@ -37,43 +36,13 @@ where p: PermutationSequence>, } -/* -impl, C: Dim> Copy for LU +impl, C: Dim> Copy for LU where DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, + OMatrix: Copy, PermutationSequence>: Copy, - InnerOwned: Copy, { } -*/ - -impl, C: Dim> Clone for LU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: Clone, - InnerOwned: Clone, -{ - fn clone(&self) -> Self { - Self { - lu: self.lu.clone(), - p: self.p.clone(), - } - } -} - -impl, C: Dim> fmt::Debug for LU -where - DefaultAllocator: Allocator + Allocator<(usize, usize), DimMinimum>, - PermutationSequence>: fmt::Debug, - InnerOwned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("LU") - .field("lu", &self.lu) - .field("p", &self.p) - .finish() - } -} /// Performs a LU decomposition to overwrite `out` with the inverse of `matrix`. /// @@ -121,7 +90,7 @@ where { /// Computes the LU decomposition with partial (row) pivoting of `matrix`. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let mut p = PermutationSequence::identity_generic(min_nrows_ncols); @@ -163,7 +132,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -180,7 +149,7 @@ where where DefaultAllocator: Reallocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.resize_generic(nrows, nrows.min(ncols), T::zero()); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -193,7 +162,7 @@ where where DefaultAllocator: Reallocator>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut m = self.lu.resize_generic(nrows, nrows.min(ncols), T::zero()); m.fill_upper_triangle(T::zero(), 1); m.fill_diagonal(T::one()); @@ -207,7 +176,7 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); self.lu.rows_generic(0, nrows.min(ncols)).upper_triangle() } @@ -299,7 +268,7 @@ where "LU inverse: unable to compute the inverse of a non-square matrix." ); - let (nrows, ncols) = self.lu.data.shape(); + let (nrows, ncols) = self.lu.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.try_inverse_to(&mut res) { Some(res) diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index 14ff718d..f4521988 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -1,6 +1,3 @@ -use std::fmt; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -11,10 +8,8 @@ use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, OVector, Scalar}; #[cfg(any(feature = "std", feature = "alloc"))] use crate::dimension::Dynamic; -use crate::dimension::{Dim, DimName}; -use crate::iter::MatrixIter; -use crate::storage::{InnerOwned, StorageMut}; -use crate::{Const, U1}; +use crate::dimension::{Const, Dim, DimName}; +use crate::storage::StorageMut; /// A sequence of row or column permutations. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -28,47 +23,22 @@ use crate::{Const, U1}; serde(bound(deserialize = "DefaultAllocator: Allocator<(usize, usize), D>, OVector<(usize, usize), D>: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, { len: usize, - ipiv: OVector, D>, + ipiv: OVector<(usize, usize), D>, } impl Copy for PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, - OVector, D>: Copy, + OVector<(usize, usize), D>: Copy, { } -impl Clone for PermutationSequence -where - DefaultAllocator: Allocator<(usize, usize), D>, - OVector, D>: Clone, -{ - fn clone(&self) -> Self { - Self { - len: self.len, - ipiv: self.ipiv.clone(), - } - } -} - -impl fmt::Debug for PermutationSequence -where - DefaultAllocator: Allocator<(usize, usize), D>, - OVector, D>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("PermutationSequence") - .field("len", &self.len) - .field("ipiv", &self.ipiv) - .finish() - } -} - impl PermutationSequence where DefaultAllocator: Allocator<(usize, usize), D>, @@ -101,7 +71,9 @@ where pub fn identity_generic(dim: D) -> Self { Self { len: 0, - ipiv: OVector::new_uninitialized_generic(dim, Const::<1>), + // TODO: using a uninitialized matrix would save some computation, but + // that loos difficult to setup with MaybeUninit. + ipiv: Matrix::repeat_generic(dim, Const::<1>, (0, 0)), } } @@ -114,7 +86,7 @@ where self.len < self.ipiv.len(), "Maximum number of permutations exceeded." ); - self.ipiv[self.len] = MaybeUninit::new((i, i2)); + self.ipiv[self.len] = (i, i2); self.len += 1; } } @@ -125,8 +97,8 @@ where where S2: StorageMut, { - for perm in self.iter() { - rhs.swap_rows(perm.0, perm.1) + for i in self.ipiv.rows_range(..self.len).iter() { + rhs.swap_rows(i.0, i.1) } } @@ -136,8 +108,8 @@ where where S2: StorageMut, { - for perm in self.iter().rev() { - let (i1, i2) = perm; + for i in 0..self.len { + let (i1, i2) = self.ipiv[self.len - i - 1]; rhs.swap_rows(i1, i2) } } @@ -148,8 +120,8 @@ where where S2: StorageMut, { - for perm in self.iter() { - rhs.swap_columns(perm.0, perm.1) + for i in self.ipiv.rows_range(..self.len).iter() { + rhs.swap_columns(i.0, i.1) } } @@ -161,8 +133,8 @@ where ) where S2: StorageMut, { - for perm in self.iter().rev() { - let (i1, i2) = perm; + for i in 0..self.len { + let (i1, i2) = self.ipiv[self.len - i - 1]; rhs.swap_columns(i1, i2) } } @@ -189,27 +161,4 @@ where -T::one() } } - - /// Iterates over the permutations that have been initialized. - pub fn iter( - &self, - ) -> std::iter::Map< - std::iter::Copied< - std::iter::Take< - MatrixIter< - MaybeUninit<(usize, usize)>, - D, - U1, - InnerOwned, D, U1>, - >, - >, - >, - impl FnMut(MaybeUninit<(usize, usize)>) -> (usize, usize), - > { - self.ipiv - .iter() - .take(self.len) - .copied() - .map(|e| unsafe { e.assume_init() }) - } } diff --git a/src/linalg/pow.rs b/src/linalg/pow.rs index 000dc8b8..df513643 100644 --- a/src/linalg/pow.rs +++ b/src/linalg/pow.rs @@ -40,31 +40,19 @@ where // We use the buffer to hold the result of multiplier ^ 2, thus avoiding // extra allocations. - let (nrows, ncols) = self.data.shape(); let mut multiplier = self.clone_owned(); - let mut buf = Matrix::new_uninitialized_generic(nrows, ncols); + let mut buf = self.clone_owned(); // Exponentiation by squares. loop { if e % two == one { - let init_buf = self.mul_to(&multiplier, &mut buf); - self.copy_from(&init_buf); - - // Safety: `mul_to` leaves `buf` completely initialized. - unsafe { - buf.reinitialize(); - } + self.mul_to(&multiplier, &mut buf); + self.copy_from(&buf); } e /= two; - - let init_buf = multiplier.mul_to(&multiplier, &mut buf); - multiplier.copy_from(&init_buf); - - // Safety: `mul_to` leaves `buf` completely initialized. - unsafe { - buf.reinitialize(); - } + multiplier.mul_to(&multiplier, &mut buf); + multiplier.copy_from(&buf); if e == zero { return true; diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index e4a4911b..e2f8e0c3 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -1,5 +1,3 @@ -use std::fmt; - use num::Zero; #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -8,11 +6,12 @@ use crate::allocator::{Allocator, Reallocator}; use crate::base::{DefaultAllocator, Matrix, OMatrix, OVector, Unit}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; use crate::dimension::{Const, Dim, DimMin, DimMinimum}; -use crate::storage::{InnerOwned, Storage, StorageMut}; +use crate::storage::{Storage, StorageMut}; use simba::scalar::ComplexField; use crate::geometry::Reflection; use crate::linalg::householder; +use std::mem::MaybeUninit; /// The QR decomposition of a general matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -30,8 +29,8 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] - -pub struct QR, C: Dim> +#[derive(Clone, Debug)] +pub struct QR, C: Dim> where DefaultAllocator: Allocator + Allocator>, { @@ -39,43 +38,13 @@ where diag: OVector>, } -/* -impl, C: Dim> Copy for QR +impl, C: Dim> Copy for QR where DefaultAllocator: Allocator + Allocator>, - InnerOwned: Copy, - InnerOwned>: Copy, + OMatrix: Copy, + OVector>: Copy, { } -*/ - -impl, C: Dim> Clone for QR -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: Clone, - InnerOwned>: Clone, -{ - fn clone(&self) -> Self { - Self { - qr: self.qr.clone(), - diag: self.diag.clone(), - } - } -} - -impl, C: Dim> fmt::Debug for QR -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: fmt::Debug, - InnerOwned>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("QR") - .field("qr", &self.qr) - .field("diag", &self.diag) - .finish() - } -} impl, C: Dim> QR where @@ -83,32 +52,26 @@ where { /// Computes the QR decomposition using householder reflections. pub fn new(mut matrix: OMatrix) -> Self { - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); - let mut diag = Matrix::new_uninitialized_generic(min_nrows_ncols, Const::<1>); - if min_nrows_ncols.value() == 0 { - return Self { + return QR { qr: matrix, - diag: unsafe { diag.assume_init() }, + diag: Matrix::zeros_generic(min_nrows_ncols, Const::<1>), }; } + let mut diag = Matrix::uninit(min_nrows_ncols, Const::<1>); + for i in 0..min_nrows_ncols.value() { - // Safety: the pointer is valid for writes, aligned, and uninitialized. - unsafe { - householder::clear_column_unchecked(&mut matrix, diag[i].as_mut_ptr(), i, 0, None); - } + diag[i] = + MaybeUninit::new(householder::clear_column_unchecked(&mut matrix, i, 0, None)); } - // Safety: all values have been initialized. - unsafe { - Self { - qr: matrix, - diag: diag.assume_init(), - } - } + // Safety: diag is now fully initialized. + let diag = unsafe { diag.assume_init() }; + QR { qr: matrix, diag } } /// Retrieves the upper trapezoidal submatrix `R` of this decomposition. @@ -118,7 +81,7 @@ where where DefaultAllocator: Allocator, C>, { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let mut res = self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle(); res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus()))); res @@ -132,7 +95,7 @@ where where DefaultAllocator: Reallocator, C>, { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let mut res = self.qr.resize_generic(nrows.min(ncols), ncols, T::zero()); res.fill_lower_triangle(T::zero(), 1); res.set_partial_diagonal(self.diag.iter().map(|e| T::from_real(e.modulus()))); @@ -145,7 +108,7 @@ where where DefaultAllocator: Allocator>, { - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); // NOTE: we could build the identity matrix and call q_mul on it. // Instead we don't so that we take in account the matrix sparseness. @@ -297,7 +260,7 @@ where ); // TODO: is there a less naive method ? - let (nrows, ncols) = self.qr.data.shape(); + let (nrows, ncols) = self.qr.shape_generic(); let mut res = OMatrix::identity_generic(nrows, ncols); if self.solve_mut(&mut res) { diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index d4ee2446..953e9953 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -1,25 +1,23 @@ #![allow(clippy::suspicious_operation_groupings)] -use std::cmp; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use approx::AbsDiffEq; use num_complex::Complex as NumComplex; use simba::scalar::{ComplexField, RealField}; +use std::cmp; use crate::allocator::Allocator; -use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; -use crate::base::storage::{InnerOwned, Storage}; -use crate::base::{ - DefaultAllocator, OMatrix, OVector, SquareMatrix, Unit, Vector2, Vector3, -}; +use crate::base::dimension::{Const, Dim, DimDiff, DimSub, Dynamic, U1, U2}; +use crate::base::storage::Storage; +use crate::base::{DefaultAllocator, OMatrix, OVector, SquareMatrix, Unit, Vector2, Vector3}; use crate::geometry::Reflection; use crate::linalg::givens::GivensRotation; use crate::linalg::householder; use crate::linalg::Hessenberg; +use crate::{Matrix, UninitVector}; +use std::mem::MaybeUninit; /// Schur decomposition of a square matrix. /// @@ -36,7 +34,7 @@ use crate::linalg::Hessenberg; OMatrix: Deserialize<'de>")) )] #[derive(Clone, Debug)] -pub struct Schur +pub struct Schur where DefaultAllocator: Allocator, { @@ -44,10 +42,10 @@ where t: OMatrix, } -impl Copy for Schur +impl Copy for Schur where DefaultAllocator: Allocator, - InnerOwned: Copy, + OMatrix: Copy, { } @@ -76,7 +74,7 @@ where /// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm /// continues indefinitely until convergence. pub fn try_new(m: OMatrix, eps: T::RealField, max_niter: usize) -> Option { - let mut work = OVector::new_uninitialized_generic(m.data.shape().0, Const::<1>); + let mut work = Matrix::zeros_generic(m.shape_generic().0, Const::<1>); Self::do_decompose(m, &mut work, eps, max_niter, true) .map(|(q, t)| Schur { q: q.unwrap(), t }) @@ -84,7 +82,7 @@ where fn do_decompose( mut m: OMatrix, - work: &mut OVector, D>, + work: &mut OVector, eps: T::RealField, max_niter: usize, compute_q: bool, @@ -94,7 +92,7 @@ where "Unable to compute the eigenvectors and eigenvalues of a non-square matrix." ); - let dim = m.data.shape().0; + let dim = m.shape_generic().0; // Specialization would make this easier. if dim.value() == 0 { @@ -273,9 +271,7 @@ where } /// Computes the eigenvalues of the decomposed matrix. - fn do_eigenvalues(t: &OMatrix, out: &mut OVector, D>) -> bool { - // TODO: check dropping stuff. - + fn do_eigenvalues(t: &OMatrix, out: &mut OVector) -> bool { let dim = t.nrows(); let mut m = 0; @@ -283,7 +279,7 @@ where let n = m + 1; if t[(n, m)].is_zero() { - out[m] = MaybeUninit::new(t[(m, m)]); + out[m] = t[(m, m)]; m += 1; } else { // Complex eigenvalue. @@ -292,22 +288,18 @@ where } if m == dim - 1 { - out[m] = MaybeUninit::new(t[(m, m)]); + out[m] = t[(m, m)]; } true } /// Computes the complex eigenvalues of the decomposed matrix. - fn do_complex_eigenvalues( - t: &OMatrix, - out: &mut OVector>, D>, - ) where + fn do_complex_eigenvalues(t: &OMatrix, out: &mut UninitVector, D>) + where T: RealField, DefaultAllocator: Allocator, D>, { - // TODO: check for dropping behavior. - let dim = t.nrows(); let mut m = 0; @@ -397,9 +389,9 @@ where /// Return `None` if some eigenvalues are complex. #[must_use] pub fn eigenvalues(&self) -> Option> { - let mut out = OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>); + let mut out = Matrix::zeros_generic(self.t.shape_generic().0, Const::<1>); if Self::do_eigenvalues(&self.t, &mut out) { - Some(unsafe { out.assume_init() }) + Some(out) } else { None } @@ -412,8 +404,9 @@ where T: RealField, DefaultAllocator: Allocator, D>, { - let mut out = OVector::new_uninitialized_generic(self.t.data.shape().0, Const::<1>); + let mut out = Matrix::uninit(self.t.shape_generic().0, Const::<1>); Self::do_complex_eigenvalues(&self.t, &mut out); + // Safety: out has been fully initialized by do_complex_eigenvalues. unsafe { out.assume_init() } } } @@ -425,7 +418,7 @@ fn decompose_2x2( where DefaultAllocator: Allocator, { - let dim = m.data.shape().0; + let dim = m.shape_generic().0; let mut q = None; match compute_2x2_basis(&m.fixed_slice::<2, 2>(0, 0)) { Some(rot) => { @@ -519,14 +512,12 @@ where /// Computes the eigenvalues of this matrix. #[must_use] pub fn eigenvalues(&self) -> Option> { - // TODO: check drop stuff. - assert!( self.is_square(), "Unable to compute eigenvalues of a non-square matrix." ); - let mut work = OVector::new_uninitialized_generic(self.data.shape().0, Const::<1>); + let mut work = Matrix::zeros_generic(self.shape_generic().0, Const::<1>); // Special case for 2x2 matrices. if self.nrows() == 2 { @@ -535,9 +526,9 @@ where let me = self.fixed_slice::<2, 2>(0, 0); return match compute_2x2_eigvals(&me) { Some((a, b)) => { - work[0] = MaybeUninit::new(a); - work[1] = MaybeUninit::new(b); - Some(unsafe { work.assume_init() }) + work[0] = a; + work[1] = b; + Some(work) } None => None, }; @@ -552,8 +543,9 @@ where false, ) .unwrap(); + if Schur::do_eigenvalues(&schur.1, &mut work) { - Some(unsafe { work.assume_init() }) + Some(work) } else { None } @@ -567,8 +559,8 @@ where T: RealField, DefaultAllocator: Allocator, D>, { - let dim = self.data.shape().0; - let mut work = OVector::new_uninitialized_generic(dim, Const::<1>); + let dim = self.shape_generic().0; + let mut work = Matrix::zeros_generic(dim, Const::<1>); let schur = Schur::do_decompose( self.clone_owned(), @@ -578,8 +570,9 @@ where false, ) .unwrap(); - let mut eig = OVector::new_uninitialized_generic(dim, Const::<1>); + let mut eig = Matrix::uninit(dim, Const::<1>); Schur::do_complex_eigenvalues(&schur.1, &mut eig); + // Safety: eig has been fully initialized by do_complex_eigenvalues. unsafe { eig.assume_init() } } } diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index 355d1569..0b50fc9b 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -7,8 +7,8 @@ use num::{One, Zero}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix, Matrix2x3, OMatrix, OVector, Vector2}; use crate::constraint::{SameNumberOfRows, ShapeConstraint}; -use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimName, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; +use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1}; +use crate::storage::Storage; use simba::scalar::{ComplexField, RealField}; use crate::linalg::givens::GivensRotation; @@ -54,14 +54,14 @@ where pub singular_values: OVector>, } -impl, C: DimName> Copy for SVD +impl, C: Dim> Copy for SVD where DefaultAllocator: Allocator, C> + Allocator> + Allocator>, - InnerOwned>: Copy, - InnerOwned, C>: Copy, - InnerOwned>: Copy, + OMatrix>: Copy, + OMatrix, C>: Copy, + OVector>: Copy, { } @@ -111,7 +111,7 @@ where !matrix.is_empty(), "Cannot compute the SVD of an empty matrix." ); - let (nrows, ncols) = matrix.data.shape(); + let (nrows, ncols) = matrix.shape_generic(); let min_nrows_ncols = nrows.min(ncols); let dim = min_nrows_ncols.value(); diff --git a/src/linalg/symmetric_eigen.rs b/src/linalg/symmetric_eigen.rs index df32cdac..5ac6d5da 100644 --- a/src/linalg/symmetric_eigen.rs +++ b/src/linalg/symmetric_eigen.rs @@ -1,5 +1,3 @@ -use std::fmt; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; @@ -8,8 +6,8 @@ use num::Zero; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Matrix2, OMatrix, OVector, SquareMatrix, Vector2}; -use crate::dimension::{Dim, DimDiff, DimName, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; +use crate::dimension::{Dim, DimDiff, DimSub, U1}; +use crate::storage::Storage; use simba::scalar::ComplexField; use crate::linalg::givens::GivensRotation; @@ -31,6 +29,7 @@ use crate::linalg::SymmetricTridiagonal; OVector: Deserialize<'de>, OMatrix: Deserialize<'de>")) )] +#[derive(Clone, Debug)] pub struct SymmetricEigen where DefaultAllocator: Allocator + Allocator, @@ -42,42 +41,14 @@ where pub eigenvalues: OVector, } -impl Copy for SymmetricEigen +impl Copy for SymmetricEigen where DefaultAllocator: Allocator + Allocator, - InnerOwned: Copy, - InnerOwned: Copy, + OMatrix: Copy, + OVector: Copy, { } -impl Clone for SymmetricEigen -where - DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, - InnerOwned: Clone, -{ - fn clone(&self) -> Self { - Self { - eigenvectors: self.eigenvectors.clone(), - eigenvalues: self.eigenvalues.clone(), - } - } -} - -impl fmt::Debug for SymmetricEigen -where - DefaultAllocator: Allocator + Allocator, - InnerOwned: fmt::Debug, - InnerOwned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("SymmetricEigen") - .field("eigenvectors", &self.eigenvectors) - .field("eigenvalues", &self.eigenvalues) - .finish() - } -} - impl SymmetricEigen where DefaultAllocator: Allocator + Allocator, @@ -299,10 +270,7 @@ where /// /// This is useful if some of the eigenvalues have been manually modified. #[must_use] - pub fn recompose(&self) -> OMatrix - where - InnerOwned: Clone, - { + pub fn recompose(&self) -> OMatrix { let mut u_t = self.eigenvectors.clone(); for i in 0..self.eigenvalues.len() { let val = self.eigenvalues[i]; diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index f074b0eb..e071a916 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -1,16 +1,14 @@ -use std::fmt; -use std::mem::MaybeUninit; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{DefaultAllocator, OMatrix, OVector}; -use crate::dimension::{Const, DimDiff, DimName, DimSub, U1}; -use crate::storage::{InnerOwned, Storage}; +use crate::dimension::{Const, DimDiff, DimSub, U1}; use simba::scalar::ComplexField; use crate::linalg::householder; +use crate::Matrix; +use std::mem::MaybeUninit; /// Tridiagonalization of a symmetric matrix. #[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))] @@ -28,7 +26,8 @@ use crate::linalg::householder; OMatrix: Deserialize<'de>, OVector>: Deserialize<'de>")) )] -pub struct SymmetricTridiagonal> +#[derive(Clone, Debug)] +pub struct SymmetricTridiagonal> where DefaultAllocator: Allocator + Allocator>, { @@ -36,42 +35,14 @@ where off_diagonal: OVector>, } -impl + DimName> Copy for SymmetricTridiagonal +impl> Copy for SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, - InnerOwned: Copy, - InnerOwned>: Copy, + OMatrix: Copy, + OVector>: Copy, { } -impl> Clone for SymmetricTridiagonal -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: Clone, - InnerOwned>: Clone, -{ - fn clone(&self) -> Self { - Self { - tri: self.tri.clone(), - off_diagonal: self.off_diagonal.clone(), - } - } -} - -impl> fmt::Debug for SymmetricTridiagonal -where - DefaultAllocator: Allocator + Allocator>, - InnerOwned: fmt::Debug, - InnerOwned>: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("SymmetricTridiagonal") - .field("tri", &self.tri) - .field("off_diagonal", &self.off_diagonal) - .finish() - } -} - impl> SymmetricTridiagonal where DefaultAllocator: Allocator + Allocator>, @@ -80,7 +51,7 @@ where /// /// Only the lower-triangular part (including the diagonal) of `m` is read. pub fn new(mut m: OMatrix) -> Self { - let dim = m.data.shape().0; + let dim = m.shape_generic().0; assert!( m.is_square(), @@ -91,8 +62,8 @@ where "Unable to compute the symmetric tridiagonal decomposition of an empty matrix." ); - let mut off_diagonal = OVector::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); - let mut p = OVector::new_uninitialized_generic(dim.sub(Const::<1>), Const::<1>); + let mut off_diagonal = Matrix::uninit(dim.sub(Const::<1>), Const::<1>); + let mut p = Matrix::zeros_generic(dim.sub(Const::<1>), Const::<1>); for i in 0..dim.value() - 1 { let mut m = m.rows_range_mut(i + 1..); @@ -104,8 +75,7 @@ where if not_zero { let mut p = p.rows_range_mut(i..); - p.hegemv_z(crate::convert(2.0), &m, &axis); - let p = unsafe { p.slice_assume_init() }; + p.hegemv(crate::convert(2.0), &m, &axis, T::zero()); let dot = axis.dotc(&p); m.hegerc(-T::one(), &p, &axis, T::one()); @@ -114,9 +84,11 @@ where } } + // Safety: off_diagonal has been fully initialized. + let off_diagonal = unsafe { off_diagonal.assume_init() }; Self { tri: m, - off_diagonal: unsafe { off_diagonal.assume_init() }, + off_diagonal, } } diff --git a/src/linalg/udu.rs b/src/linalg/udu.rs index 5d78951b..546fa95a 100644 --- a/src/linalg/udu.rs +++ b/src/linalg/udu.rs @@ -1,12 +1,9 @@ -use std::fmt; - #[cfg(feature = "serde-serialize-no-std")] use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; use crate::base::{Const, DefaultAllocator, OMatrix, OVector}; -use crate::dimension::{Dim, DimName}; -use crate::storage::{InnerOwned, Storage}; +use crate::dimension::Dim; use simba::scalar::RealField; /// UDU factorization. @@ -21,7 +18,8 @@ use simba::scalar::RealField; deserialize = "OVector: Deserialize<'de>, OMatrix: Deserialize<'de>" )) )] -pub struct UDU +#[derive(Clone, Debug)] +pub struct UDU where DefaultAllocator: Allocator + Allocator, { @@ -31,42 +29,14 @@ where pub d: OVector, } -impl Copy for UDU +impl Copy for UDU where DefaultAllocator: Allocator + Allocator, - InnerOwned: Copy, - InnerOwned: Copy, + OVector: Copy, + OMatrix: Copy, { } -impl Clone for UDU -where - DefaultAllocator: Allocator + Allocator, - InnerOwned: Clone, - InnerOwned: Clone, -{ - fn clone(&self) -> Self { - Self { - u: self.u.clone(), - d: self.d.clone(), - } - } -} - -impl fmt::Debug for UDU -where - DefaultAllocator: Allocator + Allocator, - InnerOwned: fmt::Debug, - InnerOwned: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("UDU") - .field("u", &self.u) - .field("d", &self.d) - .finish() - } -} - impl UDU where DefaultAllocator: Allocator + Allocator, @@ -79,7 +49,7 @@ where /// Ref.: "Optimal control and estimation-Dover Publications", Robert F. Stengel, (1994) page 360 pub fn new(p: OMatrix) -> Option { let n = p.ncols(); - let n_dim = p.data.shape().1; + let n_dim = p.shape_generic().1; let mut d = OVector::zeros_generic(n_dim, Const::<1>); let mut u = OMatrix::zeros_generic(n_dim, n_dim); diff --git a/src/proptest/mod.rs b/src/proptest/mod.rs index e910bdf0..a7cbe549 100644 --- a/src/proptest/mod.rs +++ b/src/proptest/mod.rs @@ -263,7 +263,7 @@ where } /// Same as `matrix`, but without the additional anonymous generic types -fn matrix_( +fn matrix_( value_strategy: ScalarStrategy, rows: DimRange, cols: DimRange, @@ -271,6 +271,8 @@ fn matrix_( where ScalarStrategy: Strategy + Clone + 'static, ScalarStrategy::Value: Scalar, + R: Dim, + C: Dim, DefaultAllocator: Allocator, { let nrows = rows.lower_bound().value()..=rows.upper_bound().value(); @@ -330,7 +332,12 @@ where matrix_(value_strategy, length.into(), Const::<1>.into()) } -impl Default for MatrixParameters { +impl Default for MatrixParameters +where + NParameters: Default, + R: DimName, + C: DimName, +{ fn default() -> Self { Self { rows: DimRange::from(R::name()), diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 4a1a3f83..c717e90e 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -7,7 +7,7 @@ use std::slice; use crate::allocator::Allocator; use crate::sparse::cs_utils; -use crate::{Const, DefaultAllocator, Dim, Dynamic, OVector, Scalar, Vector, U1}; +use crate::{Const, DefaultAllocator, Dim, Dynamic, Matrix, OVector, Scalar, Vector, U1}; pub struct ColumnEntries<'a, T> { curr: usize, @@ -263,10 +263,6 @@ where /// `nvals` possible non-zero values. pub fn new_uninitialized_generic(nrows: R, ncols: C, nvals: usize) -> Self { let mut i = Vec::with_capacity(nvals); - - // IMPORTANT TODO: this method is still UB, and we should decide how to - // update the API to take it into account. - unsafe { i.set_len(nvals); } @@ -474,7 +470,7 @@ where { // Size = R let nrows = self.data.shape().0; - let mut workspace = CsMatrix::new_uninitialized_generic(nrows, Const::<1>); + let mut workspace = Matrix::zeros_generic(nrows, Const::<1>); self.sort_with_workspace(workspace.as_mut_slice()); } diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index cd8bf975..ff9ca023 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -3,7 +3,7 @@ use std::mem; use crate::allocator::Allocator; use crate::sparse::{CsMatrix, CsStorage, CsStorageIter, CsStorageIterMut, CsVecStorage}; -use crate::{Const, DefaultAllocator, Dim, OVector, RealField}; +use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, RealField}; /// The cholesky decomposition of a column compressed sparse matrix. pub struct CsCholesky @@ -48,8 +48,8 @@ where let (l, u) = Self::nonzero_pattern(m); // Workspaces. - let work_x = Matrix::new_uninitialized_generic(m.data.shape().0, Const::<1>); - let work_c = Matrix::new_uninitialized_generic(m.data.shape().1, Const::<1>); + let work_x = Matrix::zeros_generic(m.data.shape().0, Const::<1>); + let work_c = Matrix::zeros_generic(m.data.shape().1, Const::<1>); let mut original_p = m.data.p.as_slice().to_vec(); original_p.push(m.data.i.len()); @@ -292,7 +292,7 @@ where let etree = Self::elimination_tree(m); let (nrows, ncols) = m.data.shape(); let mut rows = Vec::with_capacity(m.len()); - let mut cols = Matrix::new_uninitialized_generic(m.data.shape().0, Const::<1>); + let mut cols = Matrix::zeros_generic(m.data.shape().0, Const::<1>); let mut marks = Vec::new(); // NOTE: the following will actually compute the non-zero pattern of diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index 2170f5d2..fba5d41b 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -6,7 +6,7 @@ use crate::allocator::Allocator; use crate::constraint::{AreMultipliable, DimEq, ShapeConstraint}; use crate::sparse::{CsMatrix, CsStorage, CsStorageMut, CsVector}; use crate::storage::StorageMut; -use crate::{Const, DefaultAllocator, Dim, OVector, Scalar, Vector}; +use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, Scalar, Vector}; impl> CsMatrix { fn scatter( @@ -242,7 +242,7 @@ where let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); let mut timestamps = OVector::zeros_generic(nrows1, Const::<1>); - let mut workspace = Matrix::new_uninitialized_generic(nrows1, Const::<1>); + let mut workspace = Matrix::zeros_generic(nrows1, Const::<1>); let mut nz = 0; for j in 0..ncols2.value() { diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index 092ad15b..6136a0f8 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -152,7 +152,7 @@ impl> CsMatrix { self.lower_triangular_reach(b, &mut reach); // We sort the reach so the result matrix has sorted indices. reach.sort_unstable(); - let mut workspace = Matrix::new_uninitialized_generic(b.data.shape().0, Const::<1>); + let mut workspace = Matrix::zeros_generic(b.data.shape().0, Const::<1>); for i in reach.iter().cloned() { workspace[i] = T::zero(); diff --git a/src/third_party/alga/alga_matrix.rs b/src/third_party/alga/alga_matrix.rs index f80b021a..6a4cb982 100644 --- a/src/third_party/alga/alga_matrix.rs +++ b/src/third_party/alga/alga_matrix.rs @@ -15,8 +15,9 @@ use alga::linear::{ use crate::base::allocator::Allocator; use crate::base::dimension::{Dim, DimName}; -use crate::base::storage::{Storage, StorageMut}; -use crate::base::{DefaultAllocator, OMatrix, Scalar}; +use crate::base::storage::{RawStorage, RawStorageMut}; +use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; +use std::mem::MaybeUninit; /* * @@ -427,14 +428,14 @@ where { #[inline] fn meet_join(&self, other: &Self) -> (Self, Self) { - let shape = self.data.shape(); + let shape = self.shape_generic(); assert!( - shape == other.data.shape(), + shape == other.shape_generic(), "Matrix meet/join error: mismatched dimensions." ); - let mut mres = Matrix::new_uninitialized_generic(shape.0, shape.1); - let mut jres = Matrix::new_uninitialized_generic(shape.0, shape.1); + let mut mres = Matrix::uninit(shape.0, shape.1); + let mut jres = Matrix::uninit(shape.0, shape.1); for i in 0..shape.0.value() * shape.1.value() { unsafe { @@ -442,11 +443,12 @@ where .data .get_unchecked_linear(i) .meet_join(other.data.get_unchecked_linear(i)); - *mres.data.get_unchecked_linear_mut(i) = mj.0; - *jres.data.get_unchecked_linear_mut(i) = mj.1; + *mres.data.get_unchecked_linear_mut(i) = MaybeUninit::new(mj.0); + *jres.data.get_unchecked_linear_mut(i) = MaybeUninit::new(mj.1); } } - (mres, jres) + // Safety: both mres and jres are now completely initialized. + unsafe { (mres.assume_init(), jres.assume_init()) } } } diff --git a/src/third_party/glam/common/glam_matrix.rs b/src/third_party/glam/common/glam_matrix.rs index 77b68b5e..80f88054 100644 --- a/src/third_party/glam/common/glam_matrix.rs +++ b/src/third_party/glam/common/glam_matrix.rs @@ -2,7 +2,7 @@ use super::glam::{ BVec2, BVec3, BVec4, DMat2, DMat3, DMat4, DVec2, DVec3, DVec4, IVec2, IVec3, IVec4, Mat2, Mat3, Mat4, UVec2, UVec3, UVec4, Vec2, Vec3, Vec3A, Vec4, }; -use crate::storage::Storage; +use crate::storage::RawStorage; use crate::{Matrix, Matrix2, Matrix3, Matrix4, Vector, Vector2, Vector3, Vector4, U2, U3, U4}; macro_rules! impl_vec_conversion( @@ -16,7 +16,7 @@ macro_rules! impl_vec_conversion( impl From> for $Vec2 where - S: Storage<$N, U2>, + S: RawStorage<$N, U2>, { #[inline] fn from(e: Vector<$N, U2, S>) -> $Vec2 { @@ -33,7 +33,7 @@ macro_rules! impl_vec_conversion( impl From> for $Vec3 where - S: Storage<$N, U3>, + S: RawStorage<$N, U3>, { #[inline] fn from(e: Vector<$N, U3, S>) -> $Vec3 { @@ -50,7 +50,7 @@ macro_rules! impl_vec_conversion( impl From> for $Vec4 where - S: Storage<$N, U4>, + S: RawStorage<$N, U4>, { #[inline] fn from(e: Vector<$N, U4, S>) -> $Vec4 { @@ -75,7 +75,7 @@ impl From for Vector3 { impl From> for Vec3A where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Vector) -> Vec3A { @@ -92,7 +92,7 @@ impl From for Matrix2 { impl From> for Mat2 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> Mat2 { @@ -112,7 +112,7 @@ impl From for Matrix3 { impl From> for Mat3 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> Mat3 { @@ -133,7 +133,7 @@ impl From for Matrix4 { impl From> for Mat4 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> Mat4 { @@ -155,7 +155,7 @@ impl From for Matrix2 { impl From> for DMat2 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> DMat2 { @@ -175,7 +175,7 @@ impl From for Matrix3 { impl From> for DMat3 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> DMat3 { @@ -196,7 +196,7 @@ impl From for Matrix4 { impl From> for DMat4 where - S: Storage, + S: RawStorage, { #[inline] fn from(e: Matrix) -> DMat4 { diff --git a/src/third_party/mint/mint_matrix.rs b/src/third_party/mint/mint_matrix.rs index 1e0a4d54..73d0a936 100644 --- a/src/third_party/mint/mint_matrix.rs +++ b/src/third_party/mint/mint_matrix.rs @@ -4,7 +4,7 @@ use std::ptr; use crate::base::allocator::Allocator; use crate::base::dimension::{U1, U2, U3, U4}; -use crate::base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; +use crate::base::storage::{IsContiguous, RawStorage, RawStorageMut}; use crate::base::{DefaultAllocator, Matrix, OMatrix, Scalar}; macro_rules! impl_from_into_mint_1D( @@ -25,9 +25,10 @@ macro_rules! impl_from_into_mint_1D( impl Into> for Matrix where T: Scalar, - S: ContiguousStorage { + S: RawStorage + IsContiguous { #[inline] fn into(self) -> mint::$VT { + // SAFETY: this is OK thanks to the IsContiguous bound. unsafe { let mut res: mint::$VT = mem::MaybeUninit::uninit().assume_init(); ptr::copy_nonoverlapping(self.data.ptr(), &mut res.x, $SZ); @@ -38,9 +39,10 @@ macro_rules! impl_from_into_mint_1D( impl AsRef> for Matrix where T: Scalar, - S: ContiguousStorage { + S: RawStorage + IsContiguous { #[inline] fn as_ref(&self) -> &mint::$VT { + // SAFETY: this is OK thanks to the IsContiguous bound. unsafe { mem::transmute(self.data.ptr()) } @@ -49,9 +51,10 @@ macro_rules! impl_from_into_mint_1D( impl AsMut> for Matrix where T: Scalar, - S: ContiguousStorageMut { + S: RawStorageMut + IsContiguous { #[inline] fn as_mut(&mut self) -> &mut mint::$VT { + // SAFETY: this is OK thanks to the IsContiguous bound. unsafe { mem::transmute(self.data.ptr_mut()) } diff --git a/src/third_party/mint/mint_point.rs b/src/third_party/mint/mint_point.rs index fbce1c88..45f85e3c 100644 --- a/src/third_party/mint/mint_point.rs +++ b/src/third_party/mint/mint_point.rs @@ -1,4 +1,4 @@ -use crate::base::storage::{Storage, StorageMut}; +use crate::base::storage::{RawStorage, RawStorageMut}; use crate::{OVector, Point, Scalar}; use std::convert::{AsMut, AsRef}; diff --git a/src/third_party/mint/mint_quaternion.rs b/src/third_party/mint/mint_quaternion.rs index 49b99f04..f41815ce 100644 --- a/src/third_party/mint/mint_quaternion.rs +++ b/src/third_party/mint/mint_quaternion.rs @@ -1,6 +1,6 @@ use crate::{Quaternion, Scalar, SimdValue, UnitQuaternion}; -impl From> for Quaternion { +impl From> for Quaternion { fn from(q: mint::Quaternion) -> Self { Self::new(q.s, q.v.x, q.v.y, q.v.z) } diff --git a/tests/core/matrix.rs b/tests/core/matrix.rs index eaa252db..4a35fb20 100644 --- a/tests/core/matrix.rs +++ b/tests/core/matrix.rs @@ -447,7 +447,7 @@ fn apply() { 1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0, 9.0, 10.0, 9.0, 8.0, 7.0, 6.0, 4.0, 3.0, 2.0, ); - a.apply(|e| e.round()); + a.apply(|e| *e = e.round()); assert_eq!(a, expected); }